2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
133 CRQB_FLAG_READ = (1 << 0),
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
141 CRPB_FLAG_STATUS_SHIFT = 8,
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
147 /* PCI interface registers */
149 PCI_COMMAND_OFS = 0xc00,
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x70a, /* assorted bits */
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 /* SATAHC registers */
201 HC_IRQ_CAUSE_OFS = 0x14,
202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
206 /* Shadow block registers */
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
219 SATA_INTERFACE_CTL = 0x050,
221 MV_M2_PREAMP_MASK = 0x7e0,
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
233 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
234 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
235 EDMA_ERR_DEV = (1 << 2), /* device error */
236 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
237 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
238 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
239 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
240 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
241 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
242 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
243 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
244 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
245 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
246 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
248 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
249 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
250 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
251 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
252 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
253 EDMA_ERR_OVERRUN_5 = (1 << 5),
254 EDMA_ERR_UNDERRUN_5 = (1 << 6),
255 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
265 EDMA_ERR_LNK_CTRL_RX_2 |
266 EDMA_ERR_LNK_DATA_RX |
267 EDMA_ERR_LNK_DATA_TX |
268 EDMA_ERR_TRANS_PROTO,
269 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
274 EDMA_ERR_UNDERRUN_5 |
275 EDMA_ERR_SELF_DIS_5 |
281 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
282 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
284 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
285 EDMA_REQ_Q_PTR_SHIFT = 5,
287 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
288 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
289 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
290 EDMA_RSP_Q_PTR_SHIFT = 3,
292 EDMA_CMD_OFS = 0x28, /* EDMA command register */
293 EDMA_EN = (1 << 0), /* enable EDMA */
294 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
295 ATA_RST = (1 << 2), /* reset trans/link/phy */
297 EDMA_IORDY_TMOUT = 0x34,
300 /* Host private flags (hp_flags) */
301 MV_HP_FLAG_MSI = (1 << 0),
302 MV_HP_ERRATA_50XXB0 = (1 << 1),
303 MV_HP_ERRATA_50XXB2 = (1 << 2),
304 MV_HP_ERRATA_60X1B2 = (1 << 3),
305 MV_HP_ERRATA_60X1C0 = (1 << 4),
306 MV_HP_ERRATA_XX42A0 = (1 << 5),
307 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
308 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
309 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
310 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
312 /* Port private flags (pp_flags) */
313 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
314 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
317 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
318 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
319 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
322 /* DMA boundary 0xffff is required by the s/g splitting
323 * we need on /length/ in mv_fill-sg().
325 MV_DMA_BOUNDARY = 0xffffU,
327 /* mask of register bits containing lower 32 bits
328 * of EDMA request queue DMA address
330 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
332 /* ditto, for response queue */
333 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
346 /* Command ReQuest Block: 32B */
362 /* Command ResPonse Block: 8B */
369 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
377 struct mv_port_priv {
378 struct mv_crqb *crqb;
380 struct mv_crpb *crpb;
382 struct mv_sg *sg_tbl;
383 dma_addr_t sg_tbl_dma;
385 unsigned int req_idx;
386 unsigned int resp_idx;
391 struct mv_port_signal {
396 struct mv_host_priv {
398 struct mv_port_signal signal[8];
399 const struct mv_hw_ops *ops;
406 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
408 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
409 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
411 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
413 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
414 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
417 static void mv_irq_clear(struct ata_port *ap);
418 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
419 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
420 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
421 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
422 static int mv_port_start(struct ata_port *ap);
423 static void mv_port_stop(struct ata_port *ap);
424 static void mv_qc_prep(struct ata_queued_cmd *qc);
425 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
426 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
427 static void mv_error_handler(struct ata_port *ap);
428 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
429 static void mv_eh_freeze(struct ata_port *ap);
430 static void mv_eh_thaw(struct ata_port *ap);
431 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
433 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
435 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
436 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
438 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
440 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
441 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
443 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
445 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
446 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
448 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
450 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
451 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
452 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
453 unsigned int port_no);
455 static struct scsi_host_template mv5_sht = {
456 .module = THIS_MODULE,
458 .ioctl = ata_scsi_ioctl,
459 .queuecommand = ata_scsi_queuecmd,
460 .can_queue = ATA_DEF_QUEUE,
461 .this_id = ATA_SHT_THIS_ID,
462 .sg_tablesize = MV_MAX_SG_CT / 2,
463 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
464 .emulated = ATA_SHT_EMULATED,
466 .proc_name = DRV_NAME,
467 .dma_boundary = MV_DMA_BOUNDARY,
468 .slave_configure = ata_scsi_slave_config,
469 .slave_destroy = ata_scsi_slave_destroy,
470 .bios_param = ata_std_bios_param,
473 static struct scsi_host_template mv6_sht = {
474 .module = THIS_MODULE,
476 .ioctl = ata_scsi_ioctl,
477 .queuecommand = ata_scsi_queuecmd,
478 .can_queue = ATA_DEF_QUEUE,
479 .this_id = ATA_SHT_THIS_ID,
480 .sg_tablesize = MV_MAX_SG_CT / 2,
481 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
482 .emulated = ATA_SHT_EMULATED,
484 .proc_name = DRV_NAME,
485 .dma_boundary = MV_DMA_BOUNDARY,
486 .slave_configure = ata_scsi_slave_config,
487 .slave_destroy = ata_scsi_slave_destroy,
488 .bios_param = ata_std_bios_param,
491 static const struct ata_port_operations mv5_ops = {
492 .tf_load = ata_tf_load,
493 .tf_read = ata_tf_read,
494 .check_status = ata_check_status,
495 .exec_command = ata_exec_command,
496 .dev_select = ata_std_dev_select,
498 .cable_detect = ata_cable_sata,
500 .qc_prep = mv_qc_prep,
501 .qc_issue = mv_qc_issue,
502 .data_xfer = ata_data_xfer,
504 .irq_clear = mv_irq_clear,
505 .irq_on = ata_irq_on,
507 .error_handler = mv_error_handler,
508 .post_internal_cmd = mv_post_int_cmd,
509 .freeze = mv_eh_freeze,
512 .scr_read = mv5_scr_read,
513 .scr_write = mv5_scr_write,
515 .port_start = mv_port_start,
516 .port_stop = mv_port_stop,
519 static const struct ata_port_operations mv6_ops = {
520 .tf_load = ata_tf_load,
521 .tf_read = ata_tf_read,
522 .check_status = ata_check_status,
523 .exec_command = ata_exec_command,
524 .dev_select = ata_std_dev_select,
526 .cable_detect = ata_cable_sata,
528 .qc_prep = mv_qc_prep,
529 .qc_issue = mv_qc_issue,
530 .data_xfer = ata_data_xfer,
532 .irq_clear = mv_irq_clear,
533 .irq_on = ata_irq_on,
535 .error_handler = mv_error_handler,
536 .post_internal_cmd = mv_post_int_cmd,
537 .freeze = mv_eh_freeze,
540 .scr_read = mv_scr_read,
541 .scr_write = mv_scr_write,
543 .port_start = mv_port_start,
544 .port_stop = mv_port_stop,
547 static const struct ata_port_operations mv_iie_ops = {
548 .tf_load = ata_tf_load,
549 .tf_read = ata_tf_read,
550 .check_status = ata_check_status,
551 .exec_command = ata_exec_command,
552 .dev_select = ata_std_dev_select,
554 .cable_detect = ata_cable_sata,
556 .qc_prep = mv_qc_prep_iie,
557 .qc_issue = mv_qc_issue,
558 .data_xfer = ata_data_xfer,
560 .irq_clear = mv_irq_clear,
561 .irq_on = ata_irq_on,
563 .error_handler = mv_error_handler,
564 .post_internal_cmd = mv_post_int_cmd,
565 .freeze = mv_eh_freeze,
568 .scr_read = mv_scr_read,
569 .scr_write = mv_scr_write,
571 .port_start = mv_port_start,
572 .port_stop = mv_port_stop,
575 static const struct ata_port_info mv_port_info[] = {
577 .flags = MV_COMMON_FLAGS,
578 .pio_mask = 0x1f, /* pio0-4 */
579 .udma_mask = ATA_UDMA6,
580 .port_ops = &mv5_ops,
583 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
584 .pio_mask = 0x1f, /* pio0-4 */
585 .udma_mask = ATA_UDMA6,
586 .port_ops = &mv5_ops,
589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
590 .pio_mask = 0x1f, /* pio0-4 */
591 .udma_mask = ATA_UDMA6,
592 .port_ops = &mv5_ops,
595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
596 .pio_mask = 0x1f, /* pio0-4 */
597 .udma_mask = ATA_UDMA6,
598 .port_ops = &mv6_ops,
601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
603 .pio_mask = 0x1f, /* pio0-4 */
604 .udma_mask = ATA_UDMA6,
605 .port_ops = &mv6_ops,
608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
609 .pio_mask = 0x1f, /* pio0-4 */
610 .udma_mask = ATA_UDMA6,
611 .port_ops = &mv_iie_ops,
614 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
615 .pio_mask = 0x1f, /* pio0-4 */
616 .udma_mask = ATA_UDMA6,
617 .port_ops = &mv_iie_ops,
621 static const struct pci_device_id mv_pci_tbl[] = {
622 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
623 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
624 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
625 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
626 /* RocketRAID 1740/174x have different identifiers */
627 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
628 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
630 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
631 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
632 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
633 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
634 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
636 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
639 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
641 /* Marvell 7042 support */
642 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
644 /* Highpoint RocketRAID PCIe series */
645 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
646 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
648 { } /* terminate list */
651 static struct pci_driver mv_pci_driver = {
653 .id_table = mv_pci_tbl,
654 .probe = mv_init_one,
655 .remove = ata_pci_remove_one,
658 static const struct mv_hw_ops mv5xxx_ops = {
659 .phy_errata = mv5_phy_errata,
660 .enable_leds = mv5_enable_leds,
661 .read_preamp = mv5_read_preamp,
662 .reset_hc = mv5_reset_hc,
663 .reset_flash = mv5_reset_flash,
664 .reset_bus = mv5_reset_bus,
667 static const struct mv_hw_ops mv6xxx_ops = {
668 .phy_errata = mv6_phy_errata,
669 .enable_leds = mv6_enable_leds,
670 .read_preamp = mv6_read_preamp,
671 .reset_hc = mv6_reset_hc,
672 .reset_flash = mv6_reset_flash,
673 .reset_bus = mv_reset_pci_bus,
679 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
682 /* move to PCI layer or libata core? */
683 static int pci_go_64(struct pci_dev *pdev)
687 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
688 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
690 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
692 dev_printk(KERN_ERR, &pdev->dev,
693 "64-bit DMA enable failed\n");
698 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
700 dev_printk(KERN_ERR, &pdev->dev,
701 "32-bit DMA enable failed\n");
704 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
706 dev_printk(KERN_ERR, &pdev->dev,
707 "32-bit consistent DMA enable failed\n");
719 static inline void writelfl(unsigned long data, void __iomem *addr)
722 (void) readl(addr); /* flush to avoid PCI posted write */
725 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
727 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
730 static inline unsigned int mv_hc_from_port(unsigned int port)
732 return port >> MV_PORT_HC_SHIFT;
735 static inline unsigned int mv_hardport_from_port(unsigned int port)
737 return port & MV_PORT_MASK;
740 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
743 return mv_hc_base(base, mv_hc_from_port(port));
746 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
748 return mv_hc_base_from_port(base, port) +
749 MV_SATAHC_ARBTR_REG_SZ +
750 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
753 static inline void __iomem *mv_ap_base(struct ata_port *ap)
755 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
758 static inline int mv_get_hc_count(unsigned long port_flags)
760 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
763 static void mv_irq_clear(struct ata_port *ap)
767 static void mv_set_edma_ptrs(void __iomem *port_mmio,
768 struct mv_host_priv *hpriv,
769 struct mv_port_priv *pp)
774 * initialize request queue
776 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
778 WARN_ON(pp->crqb_dma & 0x3ff);
779 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
780 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
781 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
783 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
784 writelfl((pp->crqb_dma & 0xffffffff) | index,
785 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
787 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
790 * initialize response queue
792 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
794 WARN_ON(pp->crpb_dma & 0xff);
795 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
797 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
798 writelfl((pp->crpb_dma & 0xffffffff) | index,
799 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
801 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
803 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
804 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
808 * mv_start_dma - Enable eDMA engine
809 * @base: port base address
810 * @pp: port private data
812 * Verify the local cache of the eDMA state is accurate with a
816 * Inherited from caller.
818 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
819 struct mv_port_priv *pp)
821 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
822 /* clear EDMA event indicators, if any */
823 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
825 mv_set_edma_ptrs(base, hpriv, pp);
827 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
828 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
830 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
834 * __mv_stop_dma - Disable eDMA engine
835 * @ap: ATA channel to manipulate
837 * Verify the local cache of the eDMA state is accurate with a
841 * Inherited from caller.
843 static int __mv_stop_dma(struct ata_port *ap)
845 void __iomem *port_mmio = mv_ap_base(ap);
846 struct mv_port_priv *pp = ap->private_data;
850 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
851 /* Disable EDMA if active. The disable bit auto clears.
853 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
854 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
856 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
859 /* now properly wait for the eDMA to stop */
860 for (i = 1000; i > 0; i--) {
861 reg = readl(port_mmio + EDMA_CMD_OFS);
862 if (!(reg & EDMA_EN))
869 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
876 static int mv_stop_dma(struct ata_port *ap)
881 spin_lock_irqsave(&ap->host->lock, flags);
882 rc = __mv_stop_dma(ap);
883 spin_unlock_irqrestore(&ap->host->lock, flags);
889 static void mv_dump_mem(void __iomem *start, unsigned bytes)
892 for (b = 0; b < bytes; ) {
893 DPRINTK("%p: ", start + b);
894 for (w = 0; b < bytes && w < 4; w++) {
895 printk("%08x ", readl(start + b));
903 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
908 for (b = 0; b < bytes; ) {
909 DPRINTK("%02x: ", b);
910 for (w = 0; b < bytes && w < 4; w++) {
911 (void) pci_read_config_dword(pdev, b, &dw);
919 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
920 struct pci_dev *pdev)
923 void __iomem *hc_base = mv_hc_base(mmio_base,
924 port >> MV_PORT_HC_SHIFT);
925 void __iomem *port_base;
926 int start_port, num_ports, p, start_hc, num_hcs, hc;
929 start_hc = start_port = 0;
930 num_ports = 8; /* shld be benign for 4 port devs */
933 start_hc = port >> MV_PORT_HC_SHIFT;
935 num_ports = num_hcs = 1;
937 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
938 num_ports > 1 ? num_ports - 1 : start_port);
941 DPRINTK("PCI config space regs:\n");
942 mv_dump_pci_cfg(pdev, 0x68);
944 DPRINTK("PCI regs:\n");
945 mv_dump_mem(mmio_base+0xc00, 0x3c);
946 mv_dump_mem(mmio_base+0xd00, 0x34);
947 mv_dump_mem(mmio_base+0xf00, 0x4);
948 mv_dump_mem(mmio_base+0x1d00, 0x6c);
949 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
950 hc_base = mv_hc_base(mmio_base, hc);
951 DPRINTK("HC regs (HC %i):\n", hc);
952 mv_dump_mem(hc_base, 0x1c);
954 for (p = start_port; p < start_port + num_ports; p++) {
955 port_base = mv_port_base(mmio_base, p);
956 DPRINTK("EDMA regs (port %i):\n", p);
957 mv_dump_mem(port_base, 0x54);
958 DPRINTK("SATA regs (port %i):\n", p);
959 mv_dump_mem(port_base+0x300, 0x60);
964 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
972 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
975 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
984 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
986 unsigned int ofs = mv_scr_offset(sc_reg_in);
988 if (ofs != 0xffffffffU) {
989 *val = readl(mv_ap_base(ap) + ofs);
995 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
997 unsigned int ofs = mv_scr_offset(sc_reg_in);
999 if (ofs != 0xffffffffU) {
1000 writelfl(val, mv_ap_base(ap) + ofs);
1006 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1007 void __iomem *port_mmio)
1009 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1011 /* set up non-NCQ EDMA configuration */
1012 cfg &= ~(1 << 9); /* disable eQue */
1014 if (IS_GEN_I(hpriv)) {
1015 cfg &= ~0x1f; /* clear queue depth */
1016 cfg |= (1 << 8); /* enab config burst size mask */
1019 else if (IS_GEN_II(hpriv)) {
1020 cfg &= ~0x1f; /* clear queue depth */
1021 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1022 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1025 else if (IS_GEN_IIE(hpriv)) {
1026 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1027 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1028 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1029 cfg |= (1 << 18); /* enab early completion */
1030 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1031 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1032 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1035 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1039 * mv_port_start - Port specific init/start routine.
1040 * @ap: ATA channel to manipulate
1042 * Allocate and point to DMA memory, init port private memory,
1046 * Inherited from caller.
1048 static int mv_port_start(struct ata_port *ap)
1050 struct device *dev = ap->host->dev;
1051 struct mv_host_priv *hpriv = ap->host->private_data;
1052 struct mv_port_priv *pp;
1053 void __iomem *port_mmio = mv_ap_base(ap);
1056 unsigned long flags;
1059 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1063 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1067 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1069 rc = ata_pad_alloc(ap, dev);
1073 /* First item in chunk of DMA memory:
1074 * 32-slot command request table (CRQB), 32 bytes each in size
1077 pp->crqb_dma = mem_dma;
1078 mem += MV_CRQB_Q_SZ;
1079 mem_dma += MV_CRQB_Q_SZ;
1082 * 32-slot command response table (CRPB), 8 bytes each in size
1085 pp->crpb_dma = mem_dma;
1086 mem += MV_CRPB_Q_SZ;
1087 mem_dma += MV_CRPB_Q_SZ;
1090 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1093 pp->sg_tbl_dma = mem_dma;
1095 spin_lock_irqsave(&ap->host->lock, flags);
1097 mv_edma_cfg(ap, hpriv, port_mmio);
1099 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1101 spin_unlock_irqrestore(&ap->host->lock, flags);
1103 /* Don't turn on EDMA here...do it before DMA commands only. Else
1104 * we'll be unable to send non-data, PIO, etc due to restricted access
1107 ap->private_data = pp;
1112 * mv_port_stop - Port specific cleanup/stop routine.
1113 * @ap: ATA channel to manipulate
1115 * Stop DMA, cleanup port memory.
1118 * This routine uses the host lock to protect the DMA stop.
1120 static void mv_port_stop(struct ata_port *ap)
1126 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1127 * @qc: queued command whose SG list to source from
1129 * Populate the SG list and mark the last entry.
1132 * Inherited from caller.
1134 static void mv_fill_sg(struct ata_queued_cmd *qc)
1136 struct mv_port_priv *pp = qc->ap->private_data;
1137 struct scatterlist *sg;
1138 struct mv_sg *mv_sg, *last_sg = NULL;
1142 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1143 dma_addr_t addr = sg_dma_address(sg);
1144 u32 sg_len = sg_dma_len(sg);
1147 u32 offset = addr & 0xffff;
1150 if ((offset + sg_len > 0x10000))
1151 len = 0x10000 - offset;
1153 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1154 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1155 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1165 if (likely(last_sg))
1166 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1169 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1171 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1172 (last ? CRQB_CMD_LAST : 0);
1173 *cmdw = cpu_to_le16(tmp);
1177 * mv_qc_prep - Host specific command preparation.
1178 * @qc: queued command to prepare
1180 * This routine simply redirects to the general purpose routine
1181 * if command is not DMA. Else, it handles prep of the CRQB
1182 * (command request block), does some sanity checking, and calls
1183 * the SG load routine.
1186 * Inherited from caller.
1188 static void mv_qc_prep(struct ata_queued_cmd *qc)
1190 struct ata_port *ap = qc->ap;
1191 struct mv_port_priv *pp = ap->private_data;
1193 struct ata_taskfile *tf;
1197 if (qc->tf.protocol != ATA_PROT_DMA)
1200 /* Fill in command request block
1202 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1203 flags |= CRQB_FLAG_READ;
1204 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1205 flags |= qc->tag << CRQB_TAG_SHIFT;
1206 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1208 /* get current queue index from software */
1209 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1211 pp->crqb[in_index].sg_addr =
1212 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1213 pp->crqb[in_index].sg_addr_hi =
1214 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1215 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1217 cw = &pp->crqb[in_index].ata_cmd[0];
1220 /* Sadly, the CRQB cannot accomodate all registers--there are
1221 * only 11 bytes...so we must pick and choose required
1222 * registers based on the command. So, we drop feature and
1223 * hob_feature for [RW] DMA commands, but they are needed for
1224 * NCQ. NCQ will drop hob_nsect.
1226 switch (tf->command) {
1228 case ATA_CMD_READ_EXT:
1230 case ATA_CMD_WRITE_EXT:
1231 case ATA_CMD_WRITE_FUA_EXT:
1232 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1234 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1235 case ATA_CMD_FPDMA_READ:
1236 case ATA_CMD_FPDMA_WRITE:
1237 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1238 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1240 #endif /* FIXME: remove this line when NCQ added */
1242 /* The only other commands EDMA supports in non-queued and
1243 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1244 * of which are defined/used by Linux. If we get here, this
1245 * driver needs work.
1247 * FIXME: modify libata to give qc_prep a return value and
1248 * return error here.
1250 BUG_ON(tf->command);
1253 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1254 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1255 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1256 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1257 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1258 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1259 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1260 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1261 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1263 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1269 * mv_qc_prep_iie - Host specific command preparation.
1270 * @qc: queued command to prepare
1272 * This routine simply redirects to the general purpose routine
1273 * if command is not DMA. Else, it handles prep of the CRQB
1274 * (command request block), does some sanity checking, and calls
1275 * the SG load routine.
1278 * Inherited from caller.
1280 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1282 struct ata_port *ap = qc->ap;
1283 struct mv_port_priv *pp = ap->private_data;
1284 struct mv_crqb_iie *crqb;
1285 struct ata_taskfile *tf;
1289 if (qc->tf.protocol != ATA_PROT_DMA)
1292 /* Fill in Gen IIE command request block
1294 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1295 flags |= CRQB_FLAG_READ;
1297 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1298 flags |= qc->tag << CRQB_TAG_SHIFT;
1299 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1300 what we use as our tag */
1302 /* get current queue index from software */
1303 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1305 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1306 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1307 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1308 crqb->flags = cpu_to_le32(flags);
1311 crqb->ata_cmd[0] = cpu_to_le32(
1312 (tf->command << 16) |
1315 crqb->ata_cmd[1] = cpu_to_le32(
1321 crqb->ata_cmd[2] = cpu_to_le32(
1322 (tf->hob_lbal << 0) |
1323 (tf->hob_lbam << 8) |
1324 (tf->hob_lbah << 16) |
1325 (tf->hob_feature << 24)
1327 crqb->ata_cmd[3] = cpu_to_le32(
1329 (tf->hob_nsect << 8)
1332 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1338 * mv_qc_issue - Initiate a command to the host
1339 * @qc: queued command to start
1341 * This routine simply redirects to the general purpose routine
1342 * if command is not DMA. Else, it sanity checks our local
1343 * caches of the request producer/consumer indices then enables
1344 * DMA and bumps the request producer index.
1347 * Inherited from caller.
1349 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1351 struct ata_port *ap = qc->ap;
1352 void __iomem *port_mmio = mv_ap_base(ap);
1353 struct mv_port_priv *pp = ap->private_data;
1354 struct mv_host_priv *hpriv = ap->host->private_data;
1357 if (qc->tf.protocol != ATA_PROT_DMA) {
1358 /* We're about to send a non-EDMA capable command to the
1359 * port. Turn off EDMA so there won't be problems accessing
1360 * shadow block, etc registers.
1363 return ata_qc_issue_prot(qc);
1366 mv_start_dma(port_mmio, hpriv, pp);
1368 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1370 /* until we do queuing, the queue should be empty at this point */
1371 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1372 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1376 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1378 /* and write the request in pointer to kick the EDMA to life */
1379 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1380 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1386 * mv_err_intr - Handle error interrupts on the port
1387 * @ap: ATA channel to manipulate
1388 * @reset_allowed: bool: 0 == don't trigger from reset here
1390 * In most cases, just clear the interrupt and move on. However,
1391 * some cases require an eDMA reset, which is done right before
1392 * the COMRESET in mv_phy_reset(). The SERR case requires a
1393 * clear of pending errors in the SATA SERROR register. Finally,
1394 * if the port disabled DMA, update our cached copy to match.
1397 * Inherited from caller.
1399 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1401 void __iomem *port_mmio = mv_ap_base(ap);
1402 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1403 struct mv_port_priv *pp = ap->private_data;
1404 struct mv_host_priv *hpriv = ap->host->private_data;
1405 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1406 unsigned int action = 0, err_mask = 0;
1407 struct ata_eh_info *ehi = &ap->link.eh_info;
1409 ata_ehi_clear_desc(ehi);
1411 if (!edma_enabled) {
1412 /* just a guess: do we need to do this? should we
1413 * expand this, and do it in all cases?
1415 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1416 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1419 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1421 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1424 * all generations share these EDMA error cause bits
1427 if (edma_err_cause & EDMA_ERR_DEV)
1428 err_mask |= AC_ERR_DEV;
1429 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1430 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1431 EDMA_ERR_INTRL_PAR)) {
1432 err_mask |= AC_ERR_ATA_BUS;
1433 action |= ATA_EH_HARDRESET;
1434 ata_ehi_push_desc(ehi, "parity error");
1436 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1437 ata_ehi_hotplugged(ehi);
1438 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1439 "dev disconnect" : "dev connect");
1440 action |= ATA_EH_HARDRESET;
1443 if (IS_GEN_I(hpriv)) {
1444 eh_freeze_mask = EDMA_EH_FREEZE_5;
1446 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1447 struct mv_port_priv *pp = ap->private_data;
1448 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1449 ata_ehi_push_desc(ehi, "EDMA self-disable");
1452 eh_freeze_mask = EDMA_EH_FREEZE;
1454 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1455 struct mv_port_priv *pp = ap->private_data;
1456 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1457 ata_ehi_push_desc(ehi, "EDMA self-disable");
1460 if (edma_err_cause & EDMA_ERR_SERR) {
1461 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1462 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1463 err_mask = AC_ERR_ATA_BUS;
1464 action |= ATA_EH_HARDRESET;
1468 /* Clear EDMA now that SERR cleanup done */
1469 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1472 err_mask = AC_ERR_OTHER;
1473 action |= ATA_EH_HARDRESET;
1476 ehi->serror |= serr;
1477 ehi->action |= action;
1480 qc->err_mask |= err_mask;
1482 ehi->err_mask |= err_mask;
1484 if (edma_err_cause & eh_freeze_mask)
1485 ata_port_freeze(ap);
1490 static void mv_intr_pio(struct ata_port *ap)
1492 struct ata_queued_cmd *qc;
1495 /* ignore spurious intr if drive still BUSY */
1496 ata_status = readb(ap->ioaddr.status_addr);
1497 if (unlikely(ata_status & ATA_BUSY))
1500 /* get active ATA command */
1501 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1502 if (unlikely(!qc)) /* no active tag */
1504 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1507 /* and finally, complete the ATA command */
1508 qc->err_mask |= ac_err_mask(ata_status);
1509 ata_qc_complete(qc);
1512 static void mv_intr_edma(struct ata_port *ap)
1514 void __iomem *port_mmio = mv_ap_base(ap);
1515 struct mv_host_priv *hpriv = ap->host->private_data;
1516 struct mv_port_priv *pp = ap->private_data;
1517 struct ata_queued_cmd *qc;
1518 u32 out_index, in_index;
1519 bool work_done = false;
1521 /* get h/w response queue pointer */
1522 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1523 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1529 /* get s/w response queue last-read pointer, and compare */
1530 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1531 if (in_index == out_index)
1534 /* 50xx: get active ATA command */
1535 if (IS_GEN_I(hpriv))
1536 tag = ap->link.active_tag;
1538 /* Gen II/IIE: get active ATA command via tag, to enable
1539 * support for queueing. this works transparently for
1540 * queued and non-queued modes.
1542 else if (IS_GEN_II(hpriv))
1543 tag = (le16_to_cpu(pp->crpb[out_index].id)
1544 >> CRPB_IOID_SHIFT_6) & 0x3f;
1546 else /* IS_GEN_IIE */
1547 tag = (le16_to_cpu(pp->crpb[out_index].id)
1548 >> CRPB_IOID_SHIFT_7) & 0x3f;
1550 qc = ata_qc_from_tag(ap, tag);
1552 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1553 * bits (WARNING: might not necessarily be associated
1554 * with this command), which -should- be clear
1557 status = le16_to_cpu(pp->crpb[out_index].flags);
1558 if (unlikely(status & 0xff)) {
1559 mv_err_intr(ap, qc);
1563 /* and finally, complete the ATA command */
1566 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1567 ata_qc_complete(qc);
1570 /* advance software response queue pointer, to
1571 * indicate (after the loop completes) to hardware
1572 * that we have consumed a response queue entry.
1579 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1580 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1581 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1585 * mv_host_intr - Handle all interrupts on the given host controller
1586 * @host: host specific structure
1587 * @relevant: port error bits relevant to this host controller
1588 * @hc: which host controller we're to look at
1590 * Read then write clear the HC interrupt status then walk each
1591 * port connected to the HC and see if it needs servicing. Port
1592 * success ints are reported in the HC interrupt status reg, the
1593 * port error ints are reported in the higher level main
1594 * interrupt status register and thus are passed in via the
1595 * 'relevant' argument.
1598 * Inherited from caller.
1600 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1602 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1603 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1610 port0 = MV_PORTS_PER_HC;
1612 /* we'll need the HC success int register in most cases */
1613 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1617 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1619 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1620 hc, relevant, hc_irq_cause);
1622 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1623 struct ata_port *ap = host->ports[port];
1624 struct mv_port_priv *pp = ap->private_data;
1625 int have_err_bits, hard_port, shift;
1627 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1630 shift = port << 1; /* (port * 2) */
1631 if (port >= MV_PORTS_PER_HC) {
1632 shift++; /* skip bit 8 in the HC Main IRQ reg */
1634 have_err_bits = ((PORT0_ERR << shift) & relevant);
1636 if (unlikely(have_err_bits)) {
1637 struct ata_queued_cmd *qc;
1639 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1640 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1643 mv_err_intr(ap, qc);
1647 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1649 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1650 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1653 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1660 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1662 struct mv_host_priv *hpriv = host->private_data;
1663 struct ata_port *ap;
1664 struct ata_queued_cmd *qc;
1665 struct ata_eh_info *ehi;
1666 unsigned int i, err_mask, printed = 0;
1669 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1671 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1674 DPRINTK("All regs @ PCI error\n");
1675 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1677 writelfl(0, mmio + hpriv->irq_cause_ofs);
1679 for (i = 0; i < host->n_ports; i++) {
1680 ap = host->ports[i];
1681 if (!ata_link_offline(&ap->link)) {
1682 ehi = &ap->link.eh_info;
1683 ata_ehi_clear_desc(ehi);
1685 ata_ehi_push_desc(ehi,
1686 "PCI err cause 0x%08x", err_cause);
1687 err_mask = AC_ERR_HOST_BUS;
1688 ehi->action = ATA_EH_HARDRESET;
1689 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1691 qc->err_mask |= err_mask;
1693 ehi->err_mask |= err_mask;
1695 ata_port_freeze(ap);
1701 * mv_interrupt - Main interrupt event handler
1703 * @dev_instance: private data; in this case the host structure
1705 * Read the read only register to determine if any host
1706 * controllers have pending interrupts. If so, call lower level
1707 * routine to handle. Also check for PCI errors which are only
1711 * This routine holds the host lock while processing pending
1714 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1716 struct ata_host *host = dev_instance;
1717 unsigned int hc, handled = 0, n_hcs;
1718 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1721 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1723 /* check the cases where we either have nothing pending or have read
1724 * a bogus register value which can indicate HW removal or PCI fault
1726 if (!irq_stat || (0xffffffffU == irq_stat))
1729 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1730 spin_lock(&host->lock);
1732 if (unlikely(irq_stat & PCI_ERR)) {
1733 mv_pci_error(host, mmio);
1735 goto out_unlock; /* skip all other HC irq handling */
1738 for (hc = 0; hc < n_hcs; hc++) {
1739 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1741 mv_host_intr(host, relevant, hc);
1747 spin_unlock(&host->lock);
1749 return IRQ_RETVAL(handled);
1752 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1754 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1755 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1757 return hc_mmio + ofs;
1760 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1764 switch (sc_reg_in) {
1768 ofs = sc_reg_in * sizeof(u32);
1777 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1779 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1780 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1781 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1783 if (ofs != 0xffffffffU) {
1784 *val = readl(addr + ofs);
1790 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1792 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1793 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1794 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1796 if (ofs != 0xffffffffU) {
1797 writelfl(val, addr + ofs);
1803 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1807 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1810 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1812 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1815 mv_reset_pci_bus(pdev, mmio);
1818 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1820 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1823 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1826 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1829 tmp = readl(phy_mmio + MV5_PHY_MODE);
1831 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1832 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1835 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1839 writel(0, mmio + MV_GPIO_PORT_CTL);
1841 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1843 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1845 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1848 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1851 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1852 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1854 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1857 tmp = readl(phy_mmio + MV5_LT_MODE);
1859 writel(tmp, phy_mmio + MV5_LT_MODE);
1861 tmp = readl(phy_mmio + MV5_PHY_CTL);
1864 writel(tmp, phy_mmio + MV5_PHY_CTL);
1867 tmp = readl(phy_mmio + MV5_PHY_MODE);
1869 tmp |= hpriv->signal[port].pre;
1870 tmp |= hpriv->signal[port].amps;
1871 writel(tmp, phy_mmio + MV5_PHY_MODE);
1876 #define ZERO(reg) writel(0, port_mmio + (reg))
1877 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1880 void __iomem *port_mmio = mv_port_base(mmio, port);
1882 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1884 mv_channel_reset(hpriv, mmio, port);
1886 ZERO(0x028); /* command */
1887 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1888 ZERO(0x004); /* timer */
1889 ZERO(0x008); /* irq err cause */
1890 ZERO(0x00c); /* irq err mask */
1891 ZERO(0x010); /* rq bah */
1892 ZERO(0x014); /* rq inp */
1893 ZERO(0x018); /* rq outp */
1894 ZERO(0x01c); /* respq bah */
1895 ZERO(0x024); /* respq outp */
1896 ZERO(0x020); /* respq inp */
1897 ZERO(0x02c); /* test control */
1898 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1902 #define ZERO(reg) writel(0, hc_mmio + (reg))
1903 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1906 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1914 tmp = readl(hc_mmio + 0x20);
1917 writel(tmp, hc_mmio + 0x20);
1921 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1924 unsigned int hc, port;
1926 for (hc = 0; hc < n_hc; hc++) {
1927 for (port = 0; port < MV_PORTS_PER_HC; port++)
1928 mv5_reset_hc_port(hpriv, mmio,
1929 (hc * MV_PORTS_PER_HC) + port);
1931 mv5_reset_one_hc(hpriv, mmio, hc);
1938 #define ZERO(reg) writel(0, mmio + (reg))
1939 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1941 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1942 struct mv_host_priv *hpriv = host->private_data;
1945 tmp = readl(mmio + MV_PCI_MODE);
1947 writel(tmp, mmio + MV_PCI_MODE);
1949 ZERO(MV_PCI_DISC_TIMER);
1950 ZERO(MV_PCI_MSI_TRIGGER);
1951 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1952 ZERO(HC_MAIN_IRQ_MASK_OFS);
1953 ZERO(MV_PCI_SERR_MASK);
1954 ZERO(hpriv->irq_cause_ofs);
1955 ZERO(hpriv->irq_mask_ofs);
1956 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1957 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1958 ZERO(MV_PCI_ERR_ATTRIBUTE);
1959 ZERO(MV_PCI_ERR_COMMAND);
1963 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1967 mv5_reset_flash(hpriv, mmio);
1969 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1971 tmp |= (1 << 5) | (1 << 6);
1972 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1976 * mv6_reset_hc - Perform the 6xxx global soft reset
1977 * @mmio: base address of the HBA
1979 * This routine only applies to 6xxx parts.
1982 * Inherited from caller.
1984 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1987 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1991 /* Following procedure defined in PCI "main command and status
1995 writel(t | STOP_PCI_MASTER, reg);
1997 for (i = 0; i < 1000; i++) {
2000 if (PCI_MASTER_EMPTY & t)
2003 if (!(PCI_MASTER_EMPTY & t)) {
2004 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2012 writel(t | GLOB_SFT_RST, reg);
2015 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2017 if (!(GLOB_SFT_RST & t)) {
2018 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2023 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2026 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2029 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2031 if (GLOB_SFT_RST & t) {
2032 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2039 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2042 void __iomem *port_mmio;
2045 tmp = readl(mmio + MV_RESET_CFG);
2046 if ((tmp & (1 << 0)) == 0) {
2047 hpriv->signal[idx].amps = 0x7 << 8;
2048 hpriv->signal[idx].pre = 0x1 << 5;
2052 port_mmio = mv_port_base(mmio, idx);
2053 tmp = readl(port_mmio + PHY_MODE2);
2055 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2056 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2059 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2061 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2064 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2067 void __iomem *port_mmio = mv_port_base(mmio, port);
2069 u32 hp_flags = hpriv->hp_flags;
2071 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2073 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2076 if (fix_phy_mode2) {
2077 m2 = readl(port_mmio + PHY_MODE2);
2080 writel(m2, port_mmio + PHY_MODE2);
2084 m2 = readl(port_mmio + PHY_MODE2);
2085 m2 &= ~((1 << 16) | (1 << 31));
2086 writel(m2, port_mmio + PHY_MODE2);
2091 /* who knows what this magic does */
2092 tmp = readl(port_mmio + PHY_MODE3);
2095 writel(tmp, port_mmio + PHY_MODE3);
2097 if (fix_phy_mode4) {
2100 m4 = readl(port_mmio + PHY_MODE4);
2102 if (hp_flags & MV_HP_ERRATA_60X1B2)
2103 tmp = readl(port_mmio + 0x310);
2105 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2107 writel(m4, port_mmio + PHY_MODE4);
2109 if (hp_flags & MV_HP_ERRATA_60X1B2)
2110 writel(tmp, port_mmio + 0x310);
2113 /* Revert values of pre-emphasis and signal amps to the saved ones */
2114 m2 = readl(port_mmio + PHY_MODE2);
2116 m2 &= ~MV_M2_PREAMP_MASK;
2117 m2 |= hpriv->signal[port].amps;
2118 m2 |= hpriv->signal[port].pre;
2121 /* according to mvSata 3.6.1, some IIE values are fixed */
2122 if (IS_GEN_IIE(hpriv)) {
2127 writel(m2, port_mmio + PHY_MODE2);
2130 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2131 unsigned int port_no)
2133 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2135 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2137 if (IS_GEN_II(hpriv)) {
2138 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2139 ifctl |= (1 << 7); /* enable gen2i speed */
2140 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2141 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2144 udelay(25); /* allow reset propagation */
2146 /* Spec never mentions clearing the bit. Marvell's driver does
2147 * clear the bit, however.
2149 writelfl(0, port_mmio + EDMA_CMD_OFS);
2151 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2153 if (IS_GEN_I(hpriv))
2158 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2159 * @ap: ATA channel to manipulate
2161 * Part of this is taken from __sata_phy_reset and modified to
2162 * not sleep since this routine gets called from interrupt level.
2165 * Inherited from caller. This is coded to safe to call at
2166 * interrupt level, i.e. it does not sleep.
2168 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2169 unsigned long deadline)
2171 struct mv_port_priv *pp = ap->private_data;
2172 struct mv_host_priv *hpriv = ap->host->private_data;
2173 void __iomem *port_mmio = mv_ap_base(ap);
2177 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2181 u32 sstatus, serror, scontrol;
2183 mv_scr_read(ap, SCR_STATUS, &sstatus);
2184 mv_scr_read(ap, SCR_ERROR, &serror);
2185 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2186 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2187 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2191 /* Issue COMRESET via SControl */
2193 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2196 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2200 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2201 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2205 } while (time_before(jiffies, deadline));
2207 /* work around errata */
2208 if (IS_GEN_II(hpriv) &&
2209 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2211 goto comreset_retry;
2215 u32 sstatus, serror, scontrol;
2217 mv_scr_read(ap, SCR_STATUS, &sstatus);
2218 mv_scr_read(ap, SCR_ERROR, &serror);
2219 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2220 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2221 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2225 if (ata_link_offline(&ap->link)) {
2226 *class = ATA_DEV_NONE;
2230 /* even after SStatus reflects that device is ready,
2231 * it seems to take a while for link to be fully
2232 * established (and thus Status no longer 0x80/0x7F),
2233 * so we poll a bit for that, here.
2237 u8 drv_stat = ata_check_status(ap);
2238 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2243 if (time_after(jiffies, deadline))
2247 /* FIXME: if we passed the deadline, the following
2248 * code probably produces an invalid result
2251 /* finally, read device signature from TF registers */
2252 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2254 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2256 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2261 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2263 struct ata_port *ap = link->ap;
2264 struct mv_port_priv *pp = ap->private_data;
2265 struct ata_eh_context *ehc = &link->eh_context;
2268 rc = mv_stop_dma(ap);
2270 ehc->i.action |= ATA_EH_HARDRESET;
2272 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2273 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2274 ehc->i.action |= ATA_EH_HARDRESET;
2277 /* if we're about to do hardreset, nothing more to do */
2278 if (ehc->i.action & ATA_EH_HARDRESET)
2281 if (ata_link_online(link))
2282 rc = ata_wait_ready(ap, deadline);
2289 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2290 unsigned long deadline)
2292 struct ata_port *ap = link->ap;
2293 struct mv_host_priv *hpriv = ap->host->private_data;
2294 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2298 mv_channel_reset(hpriv, mmio, ap->port_no);
2300 mv_phy_reset(ap, class, deadline);
2305 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2307 struct ata_port *ap = link->ap;
2310 /* print link status */
2311 sata_print_link_status(link);
2314 sata_scr_read(link, SCR_ERROR, &serr);
2315 sata_scr_write_flush(link, SCR_ERROR, serr);
2317 /* bail out if no device is present */
2318 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2319 DPRINTK("EXIT, no device\n");
2323 /* set up device control */
2324 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2327 static void mv_error_handler(struct ata_port *ap)
2329 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2330 mv_hardreset, mv_postreset);
2333 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2335 mv_stop_dma(qc->ap);
2338 static void mv_eh_freeze(struct ata_port *ap)
2340 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2341 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2345 /* FIXME: handle coalescing completion events properly */
2347 shift = ap->port_no * 2;
2351 mask = 0x3 << shift;
2353 /* disable assertion of portN err, done events */
2354 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2355 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2358 static void mv_eh_thaw(struct ata_port *ap)
2360 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2361 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2362 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2363 void __iomem *port_mmio = mv_ap_base(ap);
2364 u32 tmp, mask, hc_irq_cause;
2365 unsigned int shift, hc_port_no = ap->port_no;
2367 /* FIXME: handle coalescing completion events properly */
2369 shift = ap->port_no * 2;
2375 mask = 0x3 << shift;
2377 /* clear EDMA errors on this port */
2378 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2380 /* clear pending irq events */
2381 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2382 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2383 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2384 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2386 /* enable assertion of portN err, done events */
2387 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2388 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2392 * mv_port_init - Perform some early initialization on a single port.
2393 * @port: libata data structure storing shadow register addresses
2394 * @port_mmio: base address of the port
2396 * Initialize shadow register mmio addresses, clear outstanding
2397 * interrupts on the port, and unmask interrupts for the future
2398 * start of the port.
2401 * Inherited from caller.
2403 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2405 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2408 /* PIO related setup
2410 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2412 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2413 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2414 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2415 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2416 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2417 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2419 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2420 /* special case: control/altstatus doesn't have ATA_REG_ address */
2421 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2424 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2426 /* Clear any currently outstanding port interrupt conditions */
2427 serr_ofs = mv_scr_offset(SCR_ERROR);
2428 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2429 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2431 /* unmask all EDMA error interrupts */
2432 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2434 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2435 readl(port_mmio + EDMA_CFG_OFS),
2436 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2437 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2440 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2442 struct pci_dev *pdev = to_pci_dev(host->dev);
2443 struct mv_host_priv *hpriv = host->private_data;
2444 u32 hp_flags = hpriv->hp_flags;
2446 switch (board_idx) {
2448 hpriv->ops = &mv5xxx_ops;
2449 hp_flags |= MV_HP_GEN_I;
2451 switch (pdev->revision) {
2453 hp_flags |= MV_HP_ERRATA_50XXB0;
2456 hp_flags |= MV_HP_ERRATA_50XXB2;
2459 dev_printk(KERN_WARNING, &pdev->dev,
2460 "Applying 50XXB2 workarounds to unknown rev\n");
2461 hp_flags |= MV_HP_ERRATA_50XXB2;
2468 hpriv->ops = &mv5xxx_ops;
2469 hp_flags |= MV_HP_GEN_I;
2471 switch (pdev->revision) {
2473 hp_flags |= MV_HP_ERRATA_50XXB0;
2476 hp_flags |= MV_HP_ERRATA_50XXB2;
2479 dev_printk(KERN_WARNING, &pdev->dev,
2480 "Applying B2 workarounds to unknown rev\n");
2481 hp_flags |= MV_HP_ERRATA_50XXB2;
2488 hpriv->ops = &mv6xxx_ops;
2489 hp_flags |= MV_HP_GEN_II;
2491 switch (pdev->revision) {
2493 hp_flags |= MV_HP_ERRATA_60X1B2;
2496 hp_flags |= MV_HP_ERRATA_60X1C0;
2499 dev_printk(KERN_WARNING, &pdev->dev,
2500 "Applying B2 workarounds to unknown rev\n");
2501 hp_flags |= MV_HP_ERRATA_60X1B2;
2507 hp_flags |= MV_HP_PCIE;
2508 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2509 (pdev->device == 0x2300 || pdev->device == 0x2310))
2512 * Highpoint RocketRAID PCIe 23xx series cards:
2514 * Unconfigured drives are treated as "Legacy"
2515 * by the BIOS, and it overwrites sector 8 with
2516 * a "Lgcy" metadata block prior to Linux boot.
2518 * Configured drives (RAID or JBOD) leave sector 8
2519 * alone, but instead overwrite a high numbered
2520 * sector for the RAID metadata. This sector can
2521 * be determined exactly, by truncating the physical
2522 * drive capacity to a nice even GB value.
2524 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2526 * Warn the user, lest they think we're just buggy.
2528 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2529 " BIOS CORRUPTS DATA on all attached drives,"
2530 " regardless of if/how they are configured."
2532 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2533 " use sectors 8-9 on \"Legacy\" drives,"
2534 " and avoid the final two gigabytes on"
2535 " all RocketRAID BIOS initialized drives.\n");
2538 hpriv->ops = &mv6xxx_ops;
2539 hp_flags |= MV_HP_GEN_IIE;
2541 switch (pdev->revision) {
2543 hp_flags |= MV_HP_ERRATA_XX42A0;
2546 hp_flags |= MV_HP_ERRATA_60X1C0;
2549 dev_printk(KERN_WARNING, &pdev->dev,
2550 "Applying 60X1C0 workarounds to unknown rev\n");
2551 hp_flags |= MV_HP_ERRATA_60X1C0;
2557 dev_printk(KERN_ERR, &pdev->dev,
2558 "BUG: invalid board index %u\n", board_idx);
2562 hpriv->hp_flags = hp_flags;
2563 if (hp_flags & MV_HP_PCIE) {
2564 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2565 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2566 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2568 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2569 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2570 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2577 * mv_init_host - Perform some early initialization of the host.
2578 * @host: ATA host to initialize
2579 * @board_idx: controller index
2581 * If possible, do an early global reset of the host. Then do
2582 * our port init and clear/unmask all/relevant host interrupts.
2585 * Inherited from caller.
2587 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2589 int rc = 0, n_hc, port, hc;
2590 struct pci_dev *pdev = to_pci_dev(host->dev);
2591 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2592 struct mv_host_priv *hpriv = host->private_data;
2594 /* global interrupt mask */
2595 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2597 rc = mv_chip_id(host, board_idx);
2601 n_hc = mv_get_hc_count(host->ports[0]->flags);
2603 for (port = 0; port < host->n_ports; port++)
2604 hpriv->ops->read_preamp(hpriv, port, mmio);
2606 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2610 hpriv->ops->reset_flash(hpriv, mmio);
2611 hpriv->ops->reset_bus(pdev, mmio);
2612 hpriv->ops->enable_leds(hpriv, mmio);
2614 for (port = 0; port < host->n_ports; port++) {
2615 if (IS_GEN_II(hpriv)) {
2616 void __iomem *port_mmio = mv_port_base(mmio, port);
2618 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2619 ifctl |= (1 << 7); /* enable gen2i speed */
2620 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2621 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2624 hpriv->ops->phy_errata(hpriv, mmio, port);
2627 for (port = 0; port < host->n_ports; port++) {
2628 struct ata_port *ap = host->ports[port];
2629 void __iomem *port_mmio = mv_port_base(mmio, port);
2630 unsigned int offset = port_mmio - mmio;
2632 mv_port_init(&ap->ioaddr, port_mmio);
2634 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2635 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2638 for (hc = 0; hc < n_hc; hc++) {
2639 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2641 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2642 "(before clear)=0x%08x\n", hc,
2643 readl(hc_mmio + HC_CFG_OFS),
2644 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2646 /* Clear any currently outstanding hc interrupt conditions */
2647 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2650 /* Clear any currently outstanding host interrupt conditions */
2651 writelfl(0, mmio + hpriv->irq_cause_ofs);
2653 /* and unmask interrupt generation for host regs */
2654 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2656 if (IS_GEN_I(hpriv))
2657 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2659 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2661 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2662 "PCI int cause/mask=0x%08x/0x%08x\n",
2663 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2664 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2665 readl(mmio + hpriv->irq_cause_ofs),
2666 readl(mmio + hpriv->irq_mask_ofs));
2673 * mv_print_info - Dump key info to kernel log for perusal.
2674 * @host: ATA host to print info about
2676 * FIXME: complete this.
2679 * Inherited from caller.
2681 static void mv_print_info(struct ata_host *host)
2683 struct pci_dev *pdev = to_pci_dev(host->dev);
2684 struct mv_host_priv *hpriv = host->private_data;
2686 const char *scc_s, *gen;
2688 /* Use this to determine the HW stepping of the chip so we know
2689 * what errata to workaround
2691 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2694 else if (scc == 0x01)
2699 if (IS_GEN_I(hpriv))
2701 else if (IS_GEN_II(hpriv))
2703 else if (IS_GEN_IIE(hpriv))
2708 dev_printk(KERN_INFO, &pdev->dev,
2709 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2710 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2711 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2715 * mv_init_one - handle a positive probe of a Marvell host
2716 * @pdev: PCI device found
2717 * @ent: PCI device ID entry for the matched host
2720 * Inherited from caller.
2722 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2724 static int printed_version;
2725 unsigned int board_idx = (unsigned int)ent->driver_data;
2726 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2727 struct ata_host *host;
2728 struct mv_host_priv *hpriv;
2731 if (!printed_version++)
2732 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2735 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2737 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2738 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2739 if (!host || !hpriv)
2741 host->private_data = hpriv;
2743 /* acquire resources */
2744 rc = pcim_enable_device(pdev);
2748 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2750 pcim_pin_device(pdev);
2753 host->iomap = pcim_iomap_table(pdev);
2755 rc = pci_go_64(pdev);
2759 /* initialize adapter */
2760 rc = mv_init_host(host, board_idx);
2764 /* Enable interrupts */
2765 if (msi && pci_enable_msi(pdev))
2768 mv_dump_pci_cfg(pdev, 0x68);
2769 mv_print_info(host);
2771 pci_set_master(pdev);
2772 pci_try_set_mwi(pdev);
2773 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2774 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2777 static int __init mv_init(void)
2779 return pci_register_driver(&mv_pci_driver);
2782 static void __exit mv_exit(void)
2784 pci_unregister_driver(&mv_pci_driver);
2787 MODULE_AUTHOR("Brett Russ");
2788 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2789 MODULE_LICENSE("GPL");
2790 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2791 MODULE_VERSION(DRV_VERSION);
2793 module_param(msi, int, 0444);
2794 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2796 module_init(mv_init);
2797 module_exit(mv_exit);