]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/ata/sata_mv.c
Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
[linux-2.6-omap-h63xx.git] / drivers / ata / sata_mv.c
index 1991eb22e388a175484bacf079ba088ae2d226a6..bb73b2222627b9b6a05f96057da351c7d417f935 100644 (file)
@@ -65,6 +65,7 @@
 #include <linux/platform_device.h>
 #include <linux/ata_platform.h>
 #include <linux/mbus.h>
+#include <linux/bitops.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -545,6 +546,8 @@ static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
 static int  mv_softreset(struct ata_link *link, unsigned int *class,
                                unsigned long deadline);
 static void mv_pmp_error_handler(struct ata_port *ap);
+static void mv_process_crpb_entries(struct ata_port *ap,
+                                       struct mv_port_priv *pp);
 
 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
  * because we have to allow room for worst case splitting of
@@ -901,6 +904,10 @@ static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
 
        /*
         * Wait for the EDMA engine to finish transactions in progress.
+        * No idea what a good "timeout" value might be, but measurements
+        * indicate that it often requires hundreds of microseconds
+        * with two drives in-use.  So we use the 15msec value above
+        * as a rough guess at what even more drives might require.
         */
        for (i = 0; i < timeout; ++i) {
                u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
@@ -1156,6 +1163,10 @@ static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
        if (want_fbs) {
                new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
                new_ltmode = old_ltmode | LTMODE_BIT8;
+               if (want_ncq)
+                       new_haltcond &= ~EDMA_ERR_DEV;
+               else
+                       new_fiscfg |=  FISCFG_WAIT_DEV_ERR;
        }
 
        if (new_fiscfg != old_fiscfg)
@@ -1627,6 +1638,143 @@ static void mv_pmp_error_handler(struct ata_port *ap)
        sata_pmp_error_handler(ap);
 }
 
+static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
+{
+       void __iomem *port_mmio = mv_ap_base(ap);
+
+       return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
+}
+
+static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
+{
+       struct ata_eh_info *ehi;
+       unsigned int pmp;
+
+       /*
+        * Initialize EH info for PMPs which saw device errors
+        */
+       ehi = &ap->link.eh_info;
+       for (pmp = 0; pmp_map != 0; pmp++) {
+               unsigned int this_pmp = (1 << pmp);
+               if (pmp_map & this_pmp) {
+                       struct ata_link *link = &ap->pmp_link[pmp];
+
+                       pmp_map &= ~this_pmp;
+                       ehi = &link->eh_info;
+                       ata_ehi_clear_desc(ehi);
+                       ata_ehi_push_desc(ehi, "dev err");
+                       ehi->err_mask |= AC_ERR_DEV;
+                       ehi->action |= ATA_EH_RESET;
+                       ata_link_abort(link);
+               }
+       }
+}
+
+static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
+{
+       struct mv_port_priv *pp = ap->private_data;
+       int failed_links;
+       unsigned int old_map, new_map;
+
+       /*
+        * Device error during FBS+NCQ operation:
+        *
+        * Set a port flag to prevent further I/O being enqueued.
+        * Leave the EDMA running to drain outstanding commands from this port.
+        * Perform the post-mortem/EH only when all responses are complete.
+        * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
+        */
+       if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
+               pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
+               pp->delayed_eh_pmp_map = 0;
+       }
+       old_map = pp->delayed_eh_pmp_map;
+       new_map = old_map | mv_get_err_pmp_map(ap);
+
+       if (old_map != new_map) {
+               pp->delayed_eh_pmp_map = new_map;
+               mv_pmp_eh_prep(ap, new_map & ~old_map);
+       }
+       failed_links = hweight16(new_map);
+
+       ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
+                       "failed_links=%d nr_active_links=%d\n",
+                       __func__, pp->delayed_eh_pmp_map,
+                       ap->qc_active, failed_links,
+                       ap->nr_active_links);
+
+       if (ap->nr_active_links <= failed_links) {
+               mv_process_crpb_entries(ap, pp);
+               mv_stop_edma(ap);
+               mv_eh_freeze(ap);
+               ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
+               return 1;       /* handled */
+       }
+       ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
+       return 1;       /* handled */
+}
+
+static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
+{
+       /*
+        * Possible future enhancement:
+        *
+        * FBS+non-NCQ operation is not yet implemented.
+        * See related notes in mv_edma_cfg().
+        *
+        * Device error during FBS+non-NCQ operation:
+        *
+        * We need to snapshot the shadow registers for each failed command.
+        * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
+        */
+       return 0;       /* not handled */
+}
+
+static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
+{
+       struct mv_port_priv *pp = ap->private_data;
+
+       if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
+               return 0;       /* EDMA was not active: not handled */
+       if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
+               return 0;       /* FBS was not active: not handled */
+
+       if (!(edma_err_cause & EDMA_ERR_DEV))
+               return 0;       /* non DEV error: not handled */
+       edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
+       if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
+               return 0;       /* other problems: not handled */
+
+       if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
+               /*
+                * EDMA should NOT have self-disabled for this case.
+                * If it did, then something is wrong elsewhere,
+                * and we cannot handle it here.
+                */
+               if (edma_err_cause & EDMA_ERR_SELF_DIS) {
+                       ata_port_printk(ap, KERN_WARNING,
+                               "%s: err_cause=0x%x pp_flags=0x%x\n",
+                               __func__, edma_err_cause, pp->pp_flags);
+                       return 0; /* not handled */
+               }
+               return mv_handle_fbs_ncq_dev_err(ap);
+       } else {
+               /*
+                * EDMA should have self-disabled for this case.
+                * If it did not, then something is wrong elsewhere,
+                * and we cannot handle it here.
+                */
+               if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
+                       ata_port_printk(ap, KERN_WARNING,
+                               "%s: err_cause=0x%x pp_flags=0x%x\n",
+                               __func__, edma_err_cause, pp->pp_flags);
+                       return 0; /* not handled */
+               }
+               return mv_handle_fbs_non_ncq_dev_err(ap);
+       }
+       return 0;       /* not handled */
+}
+
 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
 {
        struct ata_eh_info *ehi = &ap->link.eh_info;
@@ -1683,6 +1831,15 @@ static void mv_err_intr(struct ata_port *ap)
        ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n",
                        __func__, edma_err_cause, pp->pp_flags);
 
+       if (edma_err_cause & EDMA_ERR_DEV) {
+               /*
+                * Device errors during FIS-based switching operation
+                * require special handling.
+                */
+               if (mv_handle_dev_err(ap, edma_err_cause))
+                       return;
+       }
+
        qc = mv_get_active_qc(ap);
        ata_ehi_clear_desc(ehi);
        ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
@@ -1861,6 +2018,8 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
         */
        if (edma_was_enabled && (port_cause & DONE_IRQ)) {
                mv_process_crpb_entries(ap, pp);
+               if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
+                       mv_handle_fbs_ncq_dev_err(ap);
        }
        /*
         * Handle chip-reported errors, or continue on to handle PIO.