2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
35 3) Add hotplug support (easy, once new-EH support appears)
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41 6) Add port multiplier support (intermediate)
43 7) Test and verify 3.0 Gbps support
45 8) Develop a low-power-consumption strategy, and implement it.
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
63 13) Verify that 7042 is fully supported. I only have a 6042.
68 #include <linux/kernel.h>
69 #include <linux/module.h>
70 #include <linux/pci.h>
71 #include <linux/init.h>
72 #include <linux/blkdev.h>
73 #include <linux/delay.h>
74 #include <linux/interrupt.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <linux/libata.h>
81 #define DRV_NAME "sata_mv"
82 #define DRV_VERSION "0.81"
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
95 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101 MV_SATAHC0_REG_BASE = 0x20000,
102 MV_FLASH_CTL = 0x1046c,
103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
112 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
115 * CRPB needs alignment on a 256B boundary. Size == 256B
116 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
123 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
126 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
127 MV_PORT_HC_SHIFT = 2,
128 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
132 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
133 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
134 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
135 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
136 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
137 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
139 CRQB_FLAG_READ = (1 << 0),
141 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
142 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15),
147 CRPB_FLAG_STATUS_SHIFT = 8,
148 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
149 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
151 EPRD_FLAG_END_OF_TBL = (1 << 31),
153 /* PCI interface registers */
155 PCI_COMMAND_OFS = 0xc00,
157 PCI_MAIN_CMD_STS_OFS = 0xd30,
158 STOP_PCI_MASTER = (1 << 2),
159 PCI_MASTER_EMPTY = (1 << 3),
160 GLOB_SFT_RST = (1 << 4),
163 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
164 MV_PCI_DISC_TIMER = 0xd04,
165 MV_PCI_MSI_TRIGGER = 0xc38,
166 MV_PCI_SERR_MASK = 0xc28,
167 MV_PCI_XBAR_TMOUT = 0x1d04,
168 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
169 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
170 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
171 MV_PCI_ERR_COMMAND = 0x1d50,
173 PCI_IRQ_CAUSE_OFS = 0x1d58,
174 PCI_IRQ_MASK_OFS = 0x1d5c,
175 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
177 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
178 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
179 PORT0_ERR = (1 << 0), /* shift by port # */
180 PORT0_DONE = (1 << 1), /* shift by port # */
181 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
182 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
184 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
185 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
186 PORTS_0_3_COAL_DONE = (1 << 8),
187 PORTS_4_7_COAL_DONE = (1 << 17),
188 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
189 GPIO_INT = (1 << 22),
190 SELF_INT = (1 << 23),
191 TWSI_INT = (1 << 24),
192 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
193 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
194 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
195 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
197 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
200 /* SATAHC registers */
203 HC_IRQ_CAUSE_OFS = 0x14,
204 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
205 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
206 DEV_IRQ = (1 << 8), /* shift by port # */
208 /* Shadow block registers */
210 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
213 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
214 SATA_ACTIVE_OFS = 0x350,
221 SATA_INTERFACE_CTL = 0x050,
223 MV_M2_PREAMP_MASK = 0x7e0,
227 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
228 EDMA_CFG_NCQ = (1 << 5),
229 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
230 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
231 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
233 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
234 EDMA_ERR_IRQ_MASK_OFS = 0xc,
235 EDMA_ERR_D_PAR = (1 << 0),
236 EDMA_ERR_PRD_PAR = (1 << 1),
237 EDMA_ERR_DEV = (1 << 2),
238 EDMA_ERR_DEV_DCON = (1 << 3),
239 EDMA_ERR_DEV_CON = (1 << 4),
240 EDMA_ERR_SERR = (1 << 5),
241 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
242 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
243 EDMA_ERR_BIST_ASYNC = (1 << 8),
244 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
245 EDMA_ERR_CRBQ_PAR = (1 << 9),
246 EDMA_ERR_CRPB_PAR = (1 << 10),
247 EDMA_ERR_INTRL_PAR = (1 << 11),
248 EDMA_ERR_IORDY = (1 << 12),
249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
251 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
252 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
253 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
254 EDMA_ERR_TRANS_PROTO = (1 << 31),
255 EDMA_ERR_OVERRUN_5 = (1 << 5),
256 EDMA_ERR_UNDERRUN_5 = (1 << 6),
257 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
258 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
259 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
260 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
261 EDMA_ERR_LNK_DATA_RX |
262 EDMA_ERR_LNK_DATA_TX |
263 EDMA_ERR_TRANS_PROTO),
265 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
266 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
268 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
269 EDMA_REQ_Q_PTR_SHIFT = 5,
271 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
272 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
273 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
274 EDMA_RSP_Q_PTR_SHIFT = 3,
281 EDMA_IORDY_TMOUT = 0x34,
284 /* Host private flags (hp_flags) */
285 MV_HP_FLAG_MSI = (1 << 0),
286 MV_HP_ERRATA_50XXB0 = (1 << 1),
287 MV_HP_ERRATA_50XXB2 = (1 << 2),
288 MV_HP_ERRATA_60X1B2 = (1 << 3),
289 MV_HP_ERRATA_60X1C0 = (1 << 4),
290 MV_HP_ERRATA_XX42A0 = (1 << 5),
291 MV_HP_50XX = (1 << 6),
292 MV_HP_GEN_IIE = (1 << 7),
294 /* Port private flags (pp_flags) */
295 MV_PP_FLAG_EDMA_EN = (1 << 0),
296 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
297 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
300 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
301 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
302 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
303 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
304 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
307 MV_DMA_BOUNDARY = 0xffffffffU,
309 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
311 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
324 /* Command ReQuest Block: 32B */
340 /* Command ResPonse Block: 8B */
347 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
355 struct mv_port_priv {
356 struct mv_crqb *crqb;
358 struct mv_crpb *crpb;
360 struct mv_sg *sg_tbl;
361 dma_addr_t sg_tbl_dma;
365 struct mv_port_signal {
372 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
374 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
375 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
377 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
379 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
380 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
383 struct mv_host_priv {
385 struct mv_port_signal signal[8];
386 const struct mv_hw_ops *ops;
389 static void mv_irq_clear(struct ata_port *ap);
390 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
391 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
392 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
393 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
394 static void mv_phy_reset(struct ata_port *ap);
395 static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
396 static int mv_port_start(struct ata_port *ap);
397 static void mv_port_stop(struct ata_port *ap);
398 static void mv_qc_prep(struct ata_queued_cmd *qc);
399 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
400 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
401 static void mv_eng_timeout(struct ata_port *ap);
402 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
404 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
406 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
407 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
409 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
411 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
412 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
414 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
416 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
417 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
419 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
421 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
422 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
423 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
424 unsigned int port_no);
425 static void mv_stop_and_reset(struct ata_port *ap);
427 static struct scsi_host_template mv5_sht = {
428 .module = THIS_MODULE,
430 .ioctl = ata_scsi_ioctl,
431 .queuecommand = ata_scsi_queuecmd,
432 .can_queue = ATA_DEF_QUEUE,
433 .this_id = ATA_SHT_THIS_ID,
434 .sg_tablesize = MV_MAX_SG_CT,
435 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
436 .emulated = ATA_SHT_EMULATED,
438 .proc_name = DRV_NAME,
439 .dma_boundary = MV_DMA_BOUNDARY,
440 .slave_configure = ata_scsi_slave_config,
441 .slave_destroy = ata_scsi_slave_destroy,
442 .bios_param = ata_std_bios_param,
445 static struct scsi_host_template mv6_sht = {
446 .module = THIS_MODULE,
448 .ioctl = ata_scsi_ioctl,
449 .queuecommand = ata_scsi_queuecmd,
450 .can_queue = ATA_DEF_QUEUE,
451 .this_id = ATA_SHT_THIS_ID,
452 .sg_tablesize = MV_MAX_SG_CT,
453 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
454 .emulated = ATA_SHT_EMULATED,
456 .proc_name = DRV_NAME,
457 .dma_boundary = MV_DMA_BOUNDARY,
458 .slave_configure = ata_scsi_slave_config,
459 .slave_destroy = ata_scsi_slave_destroy,
460 .bios_param = ata_std_bios_param,
463 static const struct ata_port_operations mv5_ops = {
464 .port_disable = ata_port_disable,
466 .tf_load = ata_tf_load,
467 .tf_read = ata_tf_read,
468 .check_status = ata_check_status,
469 .exec_command = ata_exec_command,
470 .dev_select = ata_std_dev_select,
472 .phy_reset = mv_phy_reset,
473 .cable_detect = ata_cable_sata,
475 .qc_prep = mv_qc_prep,
476 .qc_issue = mv_qc_issue,
477 .data_xfer = ata_data_xfer,
479 .eng_timeout = mv_eng_timeout,
481 .irq_clear = mv_irq_clear,
482 .irq_on = ata_irq_on,
483 .irq_ack = ata_irq_ack,
485 .scr_read = mv5_scr_read,
486 .scr_write = mv5_scr_write,
488 .port_start = mv_port_start,
489 .port_stop = mv_port_stop,
492 static const struct ata_port_operations mv6_ops = {
493 .port_disable = ata_port_disable,
495 .tf_load = ata_tf_load,
496 .tf_read = ata_tf_read,
497 .check_status = ata_check_status,
498 .exec_command = ata_exec_command,
499 .dev_select = ata_std_dev_select,
501 .phy_reset = mv_phy_reset,
502 .cable_detect = ata_cable_sata,
504 .qc_prep = mv_qc_prep,
505 .qc_issue = mv_qc_issue,
506 .data_xfer = ata_data_xfer,
508 .eng_timeout = mv_eng_timeout,
510 .irq_clear = mv_irq_clear,
511 .irq_on = ata_irq_on,
512 .irq_ack = ata_irq_ack,
514 .scr_read = mv_scr_read,
515 .scr_write = mv_scr_write,
517 .port_start = mv_port_start,
518 .port_stop = mv_port_stop,
521 static const struct ata_port_operations mv_iie_ops = {
522 .port_disable = ata_port_disable,
524 .tf_load = ata_tf_load,
525 .tf_read = ata_tf_read,
526 .check_status = ata_check_status,
527 .exec_command = ata_exec_command,
528 .dev_select = ata_std_dev_select,
530 .phy_reset = mv_phy_reset,
531 .cable_detect = ata_cable_sata,
533 .qc_prep = mv_qc_prep_iie,
534 .qc_issue = mv_qc_issue,
535 .data_xfer = ata_data_xfer,
537 .eng_timeout = mv_eng_timeout,
539 .irq_clear = mv_irq_clear,
540 .irq_on = ata_irq_on,
541 .irq_ack = ata_irq_ack,
543 .scr_read = mv_scr_read,
544 .scr_write = mv_scr_write,
546 .port_start = mv_port_start,
547 .port_stop = mv_port_stop,
550 static const struct ata_port_info mv_port_info[] = {
552 .flags = MV_COMMON_FLAGS,
553 .pio_mask = 0x1f, /* pio0-4 */
554 .udma_mask = ATA_UDMA6,
555 .port_ops = &mv5_ops,
558 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
559 .pio_mask = 0x1f, /* pio0-4 */
560 .udma_mask = ATA_UDMA6,
561 .port_ops = &mv5_ops,
564 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
565 .pio_mask = 0x1f, /* pio0-4 */
566 .udma_mask = ATA_UDMA6,
567 .port_ops = &mv5_ops,
570 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
571 .pio_mask = 0x1f, /* pio0-4 */
572 .udma_mask = ATA_UDMA6,
573 .port_ops = &mv6_ops,
576 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
578 .pio_mask = 0x1f, /* pio0-4 */
579 .udma_mask = ATA_UDMA6,
580 .port_ops = &mv6_ops,
583 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
584 .pio_mask = 0x1f, /* pio0-4 */
585 .udma_mask = ATA_UDMA6,
586 .port_ops = &mv_iie_ops,
589 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
590 .pio_mask = 0x1f, /* pio0-4 */
591 .udma_mask = ATA_UDMA6,
592 .port_ops = &mv_iie_ops,
596 static const struct pci_device_id mv_pci_tbl[] = {
597 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
598 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
599 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
600 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
602 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
603 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
604 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
605 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
606 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
608 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
611 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
613 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
615 /* add Marvell 7042 support */
616 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
618 { } /* terminate list */
621 static struct pci_driver mv_pci_driver = {
623 .id_table = mv_pci_tbl,
624 .probe = mv_init_one,
625 .remove = ata_pci_remove_one,
628 static const struct mv_hw_ops mv5xxx_ops = {
629 .phy_errata = mv5_phy_errata,
630 .enable_leds = mv5_enable_leds,
631 .read_preamp = mv5_read_preamp,
632 .reset_hc = mv5_reset_hc,
633 .reset_flash = mv5_reset_flash,
634 .reset_bus = mv5_reset_bus,
637 static const struct mv_hw_ops mv6xxx_ops = {
638 .phy_errata = mv6_phy_errata,
639 .enable_leds = mv6_enable_leds,
640 .read_preamp = mv6_read_preamp,
641 .reset_hc = mv6_reset_hc,
642 .reset_flash = mv6_reset_flash,
643 .reset_bus = mv_reset_pci_bus,
649 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
652 /* move to PCI layer or libata core? */
653 static int pci_go_64(struct pci_dev *pdev)
657 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
658 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
660 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
662 dev_printk(KERN_ERR, &pdev->dev,
663 "64-bit DMA enable failed\n");
668 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
670 dev_printk(KERN_ERR, &pdev->dev,
671 "32-bit DMA enable failed\n");
674 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
676 dev_printk(KERN_ERR, &pdev->dev,
677 "32-bit consistent DMA enable failed\n");
689 static inline void writelfl(unsigned long data, void __iomem *addr)
692 (void) readl(addr); /* flush to avoid PCI posted write */
695 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
697 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
700 static inline unsigned int mv_hc_from_port(unsigned int port)
702 return port >> MV_PORT_HC_SHIFT;
705 static inline unsigned int mv_hardport_from_port(unsigned int port)
707 return port & MV_PORT_MASK;
710 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
713 return mv_hc_base(base, mv_hc_from_port(port));
716 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
718 return mv_hc_base_from_port(base, port) +
719 MV_SATAHC_ARBTR_REG_SZ +
720 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
723 static inline void __iomem *mv_ap_base(struct ata_port *ap)
725 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
728 static inline int mv_get_hc_count(unsigned long port_flags)
730 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
733 static void mv_irq_clear(struct ata_port *ap)
737 static void mv_set_edma_ptrs(void __iomem *port_mmio,
738 struct mv_host_priv *hpriv,
739 struct mv_port_priv *pp)
742 * initialize request queue
744 WARN_ON(pp->crqb_dma & 0x3ff);
745 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
746 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
747 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
749 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
750 writelfl(pp->crqb_dma & 0xffffffff,
751 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
753 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
756 * initialize response queue
758 WARN_ON(pp->crpb_dma & 0xff);
759 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
761 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
762 writelfl(pp->crpb_dma & 0xffffffff,
763 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
765 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
767 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
768 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
773 * mv_start_dma - Enable eDMA engine
774 * @base: port base address
775 * @pp: port private data
777 * Verify the local cache of the eDMA state is accurate with a
781 * Inherited from caller.
783 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
784 struct mv_port_priv *pp)
786 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
787 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
788 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
790 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
794 * mv_stop_dma - Disable eDMA engine
795 * @ap: ATA channel to manipulate
797 * Verify the local cache of the eDMA state is accurate with a
801 * Inherited from caller.
803 static int mv_stop_dma(struct ata_port *ap)
805 void __iomem *port_mmio = mv_ap_base(ap);
806 struct mv_port_priv *pp = ap->private_data;
810 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
811 /* Disable EDMA if active. The disable bit auto clears.
813 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
814 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
816 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
819 /* now properly wait for the eDMA to stop */
820 for (i = 1000; i > 0; i--) {
821 reg = readl(port_mmio + EDMA_CMD_OFS);
822 if (!(reg & EDMA_EN))
829 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
830 /* FIXME: Consider doing a reset here to recover */
838 static void mv_dump_mem(void __iomem *start, unsigned bytes)
841 for (b = 0; b < bytes; ) {
842 DPRINTK("%p: ", start + b);
843 for (w = 0; b < bytes && w < 4; w++) {
844 printk("%08x ",readl(start + b));
852 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
857 for (b = 0; b < bytes; ) {
858 DPRINTK("%02x: ", b);
859 for (w = 0; b < bytes && w < 4; w++) {
860 (void) pci_read_config_dword(pdev,b,&dw);
868 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
869 struct pci_dev *pdev)
872 void __iomem *hc_base = mv_hc_base(mmio_base,
873 port >> MV_PORT_HC_SHIFT);
874 void __iomem *port_base;
875 int start_port, num_ports, p, start_hc, num_hcs, hc;
878 start_hc = start_port = 0;
879 num_ports = 8; /* shld be benign for 4 port devs */
882 start_hc = port >> MV_PORT_HC_SHIFT;
884 num_ports = num_hcs = 1;
886 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
887 num_ports > 1 ? num_ports - 1 : start_port);
890 DPRINTK("PCI config space regs:\n");
891 mv_dump_pci_cfg(pdev, 0x68);
893 DPRINTK("PCI regs:\n");
894 mv_dump_mem(mmio_base+0xc00, 0x3c);
895 mv_dump_mem(mmio_base+0xd00, 0x34);
896 mv_dump_mem(mmio_base+0xf00, 0x4);
897 mv_dump_mem(mmio_base+0x1d00, 0x6c);
898 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
899 hc_base = mv_hc_base(mmio_base, hc);
900 DPRINTK("HC regs (HC %i):\n", hc);
901 mv_dump_mem(hc_base, 0x1c);
903 for (p = start_port; p < start_port + num_ports; p++) {
904 port_base = mv_port_base(mmio_base, p);
905 DPRINTK("EDMA regs (port %i):\n",p);
906 mv_dump_mem(port_base, 0x54);
907 DPRINTK("SATA regs (port %i):\n",p);
908 mv_dump_mem(port_base+0x300, 0x60);
913 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
921 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
924 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
933 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
935 unsigned int ofs = mv_scr_offset(sc_reg_in);
937 if (0xffffffffU != ofs)
938 return readl(mv_ap_base(ap) + ofs);
943 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
945 unsigned int ofs = mv_scr_offset(sc_reg_in);
947 if (0xffffffffU != ofs)
948 writelfl(val, mv_ap_base(ap) + ofs);
951 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
952 void __iomem *port_mmio)
954 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
956 /* set up non-NCQ EDMA configuration */
957 cfg &= ~(1 << 9); /* disable eQue */
959 if (IS_GEN_I(hpriv)) {
960 cfg &= ~0x1f; /* clear queue depth */
961 cfg |= (1 << 8); /* enab config burst size mask */
964 else if (IS_GEN_II(hpriv)) {
965 cfg &= ~0x1f; /* clear queue depth */
966 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
967 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
970 else if (IS_GEN_IIE(hpriv)) {
971 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
972 cfg |= (1 << 22); /* enab 4-entry host queue cache */
973 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
974 cfg |= (1 << 18); /* enab early completion */
975 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
976 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
977 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
980 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
984 * mv_port_start - Port specific init/start routine.
985 * @ap: ATA channel to manipulate
987 * Allocate and point to DMA memory, init port private memory,
991 * Inherited from caller.
993 static int mv_port_start(struct ata_port *ap)
995 struct device *dev = ap->host->dev;
996 struct mv_host_priv *hpriv = ap->host->private_data;
997 struct mv_port_priv *pp;
998 void __iomem *port_mmio = mv_ap_base(ap);
1003 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1007 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1011 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1013 rc = ata_pad_alloc(ap, dev);
1017 /* First item in chunk of DMA memory:
1018 * 32-slot command request table (CRQB), 32 bytes each in size
1021 pp->crqb_dma = mem_dma;
1022 mem += MV_CRQB_Q_SZ;
1023 mem_dma += MV_CRQB_Q_SZ;
1026 * 32-slot command response table (CRPB), 8 bytes each in size
1029 pp->crpb_dma = mem_dma;
1030 mem += MV_CRPB_Q_SZ;
1031 mem_dma += MV_CRPB_Q_SZ;
1034 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1037 pp->sg_tbl_dma = mem_dma;
1039 mv_edma_cfg(ap, hpriv, port_mmio);
1041 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1043 /* Don't turn on EDMA here...do it before DMA commands only. Else
1044 * we'll be unable to send non-data, PIO, etc due to restricted access
1047 ap->private_data = pp;
1052 * mv_port_stop - Port specific cleanup/stop routine.
1053 * @ap: ATA channel to manipulate
1055 * Stop DMA, cleanup port memory.
1058 * This routine uses the host lock to protect the DMA stop.
1060 static void mv_port_stop(struct ata_port *ap)
1062 unsigned long flags;
1064 spin_lock_irqsave(&ap->host->lock, flags);
1066 spin_unlock_irqrestore(&ap->host->lock, flags);
1070 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1071 * @qc: queued command whose SG list to source from
1073 * Populate the SG list and mark the last entry.
1076 * Inherited from caller.
1078 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1080 struct mv_port_priv *pp = qc->ap->private_data;
1081 unsigned int n_sg = 0;
1082 struct scatterlist *sg;
1083 struct mv_sg *mv_sg;
1086 ata_for_each_sg(sg, qc) {
1087 dma_addr_t addr = sg_dma_address(sg);
1088 u32 sg_len = sg_dma_len(sg);
1090 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1091 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1092 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1094 if (ata_sg_is_last(sg, qc))
1095 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1104 static inline unsigned mv_inc_q_index(unsigned index)
1106 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1109 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1111 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1112 (last ? CRQB_CMD_LAST : 0);
1113 *cmdw = cpu_to_le16(tmp);
1117 * mv_qc_prep - Host specific command preparation.
1118 * @qc: queued command to prepare
1120 * This routine simply redirects to the general purpose routine
1121 * if command is not DMA. Else, it handles prep of the CRQB
1122 * (command request block), does some sanity checking, and calls
1123 * the SG load routine.
1126 * Inherited from caller.
1128 static void mv_qc_prep(struct ata_queued_cmd *qc)
1130 struct ata_port *ap = qc->ap;
1131 struct mv_port_priv *pp = ap->private_data;
1133 struct ata_taskfile *tf;
1137 if (qc->tf.protocol != ATA_PROT_DMA)
1140 /* Fill in command request block
1142 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1143 flags |= CRQB_FLAG_READ;
1144 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1145 flags |= qc->tag << CRQB_TAG_SHIFT;
1146 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1148 /* get current queue index from hardware */
1149 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1150 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1152 pp->crqb[in_index].sg_addr =
1153 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1154 pp->crqb[in_index].sg_addr_hi =
1155 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1156 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1158 cw = &pp->crqb[in_index].ata_cmd[0];
1161 /* Sadly, the CRQB cannot accomodate all registers--there are
1162 * only 11 bytes...so we must pick and choose required
1163 * registers based on the command. So, we drop feature and
1164 * hob_feature for [RW] DMA commands, but they are needed for
1165 * NCQ. NCQ will drop hob_nsect.
1167 switch (tf->command) {
1169 case ATA_CMD_READ_EXT:
1171 case ATA_CMD_WRITE_EXT:
1172 case ATA_CMD_WRITE_FUA_EXT:
1173 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1175 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1176 case ATA_CMD_FPDMA_READ:
1177 case ATA_CMD_FPDMA_WRITE:
1178 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1179 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1181 #endif /* FIXME: remove this line when NCQ added */
1183 /* The only other commands EDMA supports in non-queued and
1184 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1185 * of which are defined/used by Linux. If we get here, this
1186 * driver needs work.
1188 * FIXME: modify libata to give qc_prep a return value and
1189 * return error here.
1191 BUG_ON(tf->command);
1194 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1195 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1196 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1197 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1198 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1199 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1200 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1201 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1202 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1204 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1210 * mv_qc_prep_iie - Host specific command preparation.
1211 * @qc: queued command to prepare
1213 * This routine simply redirects to the general purpose routine
1214 * if command is not DMA. Else, it handles prep of the CRQB
1215 * (command request block), does some sanity checking, and calls
1216 * the SG load routine.
1219 * Inherited from caller.
1221 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1223 struct ata_port *ap = qc->ap;
1224 struct mv_port_priv *pp = ap->private_data;
1225 struct mv_crqb_iie *crqb;
1226 struct ata_taskfile *tf;
1230 if (qc->tf.protocol != ATA_PROT_DMA)
1233 /* Fill in Gen IIE command request block
1235 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1236 flags |= CRQB_FLAG_READ;
1238 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1239 flags |= qc->tag << CRQB_TAG_SHIFT;
1240 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1241 what we use as our tag */
1243 /* get current queue index from hardware */
1244 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1245 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1247 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1248 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1249 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1250 crqb->flags = cpu_to_le32(flags);
1253 crqb->ata_cmd[0] = cpu_to_le32(
1254 (tf->command << 16) |
1257 crqb->ata_cmd[1] = cpu_to_le32(
1263 crqb->ata_cmd[2] = cpu_to_le32(
1264 (tf->hob_lbal << 0) |
1265 (tf->hob_lbam << 8) |
1266 (tf->hob_lbah << 16) |
1267 (tf->hob_feature << 24)
1269 crqb->ata_cmd[3] = cpu_to_le32(
1271 (tf->hob_nsect << 8)
1274 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1280 * mv_qc_issue - Initiate a command to the host
1281 * @qc: queued command to start
1283 * This routine simply redirects to the general purpose routine
1284 * if command is not DMA. Else, it sanity checks our local
1285 * caches of the request producer/consumer indices then enables
1286 * DMA and bumps the request producer index.
1289 * Inherited from caller.
1291 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1293 struct ata_port *ap = qc->ap;
1294 void __iomem *port_mmio = mv_ap_base(ap);
1295 struct mv_port_priv *pp = ap->private_data;
1296 struct mv_host_priv *hpriv = ap->host->private_data;
1300 if (qc->tf.protocol != ATA_PROT_DMA) {
1301 /* We're about to send a non-EDMA capable command to the
1302 * port. Turn off EDMA so there won't be problems accessing
1303 * shadow block, etc registers.
1306 return ata_qc_issue_prot(qc);
1309 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1310 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1312 /* until we do queuing, the queue should be empty at this point */
1313 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1314 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1316 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1318 mv_start_dma(port_mmio, hpriv, pp);
1320 /* and write the request in pointer to kick the EDMA to life */
1321 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1322 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1323 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1329 * mv_get_crpb_status - get status from most recently completed cmd
1330 * @ap: ATA channel to manipulate
1332 * This routine is for use when the port is in DMA mode, when it
1333 * will be using the CRPB (command response block) method of
1334 * returning command completion information. We check indices
1335 * are good, grab status, and bump the response consumer index to
1336 * prove that we're up to date.
1339 * Inherited from caller.
1341 static u8 mv_get_crpb_status(struct ata_port *ap)
1343 void __iomem *port_mmio = mv_ap_base(ap);
1344 struct mv_port_priv *pp = ap->private_data;
1349 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1350 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1352 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1353 >> CRPB_FLAG_STATUS_SHIFT;
1355 /* increment our consumer index... */
1356 out_index = mv_inc_q_index(out_index);
1358 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1359 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1360 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1362 /* write out our inc'd consumer index so EDMA knows we're caught up */
1363 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1364 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1365 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1367 /* Return ATA status register for completed CRPB */
1372 * mv_err_intr - Handle error interrupts on the port
1373 * @ap: ATA channel to manipulate
1374 * @reset_allowed: bool: 0 == don't trigger from reset here
1376 * In most cases, just clear the interrupt and move on. However,
1377 * some cases require an eDMA reset, which is done right before
1378 * the COMRESET in mv_phy_reset(). The SERR case requires a
1379 * clear of pending errors in the SATA SERROR register. Finally,
1380 * if the port disabled DMA, update our cached copy to match.
1383 * Inherited from caller.
1385 static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1387 void __iomem *port_mmio = mv_ap_base(ap);
1388 u32 edma_err_cause, serr = 0;
1390 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1392 if (EDMA_ERR_SERR & edma_err_cause) {
1393 sata_scr_read(ap, SCR_ERROR, &serr);
1394 sata_scr_write_flush(ap, SCR_ERROR, serr);
1396 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1397 struct mv_port_priv *pp = ap->private_data;
1398 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1400 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1401 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
1403 /* Clear EDMA now that SERR cleanup done */
1404 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1406 /* check for fatal here and recover if needed */
1407 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1408 mv_stop_and_reset(ap);
1412 * mv_host_intr - Handle all interrupts on the given host controller
1413 * @host: host specific structure
1414 * @relevant: port error bits relevant to this host controller
1415 * @hc: which host controller we're to look at
1417 * Read then write clear the HC interrupt status then walk each
1418 * port connected to the HC and see if it needs servicing. Port
1419 * success ints are reported in the HC interrupt status reg, the
1420 * port error ints are reported in the higher level main
1421 * interrupt status register and thus are passed in via the
1422 * 'relevant' argument.
1425 * Inherited from caller.
1427 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1429 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1430 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1431 struct ata_queued_cmd *qc;
1434 int shift, hard_port, handled;
1435 unsigned int err_mask;
1440 port0 = MV_PORTS_PER_HC;
1442 /* we'll need the HC success int register in most cases */
1443 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1445 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1447 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1448 hc,relevant,hc_irq_cause);
1450 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1452 struct ata_port *ap = host->ports[port];
1453 struct mv_port_priv *pp = ap->private_data;
1455 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1456 handled = 0; /* ensure ata_status is set if handled++ */
1458 /* Note that DEV_IRQ might happen spuriously during EDMA,
1459 * and should be ignored in such cases.
1460 * The cause of this is still under investigation.
1462 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1463 /* EDMA: check for response queue interrupt */
1464 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1465 ata_status = mv_get_crpb_status(ap);
1469 /* PIO: check for device (drive) interrupt */
1470 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1471 ata_status = readb(ap->ioaddr.status_addr);
1473 /* ignore spurious intr if drive still BUSY */
1474 if (ata_status & ATA_BUSY) {
1481 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1484 err_mask = ac_err_mask(ata_status);
1486 shift = port << 1; /* (port * 2) */
1487 if (port >= MV_PORTS_PER_HC) {
1488 shift++; /* skip bit 8 in the HC Main IRQ reg */
1490 if ((PORT0_ERR << shift) & relevant) {
1492 err_mask |= AC_ERR_OTHER;
1497 qc = ata_qc_from_tag(ap, ap->active_tag);
1498 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1499 VPRINTK("port %u IRQ found for qc, "
1500 "ata_status 0x%x\n", port,ata_status);
1501 /* mark qc status appropriately */
1502 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1503 qc->err_mask |= err_mask;
1504 ata_qc_complete(qc);
1513 * mv_interrupt - Main interrupt event handler
1515 * @dev_instance: private data; in this case the host structure
1517 * Read the read only register to determine if any host
1518 * controllers have pending interrupts. If so, call lower level
1519 * routine to handle. Also check for PCI errors which are only
1523 * This routine holds the host lock while processing pending
1526 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1528 struct ata_host *host = dev_instance;
1529 unsigned int hc, handled = 0, n_hcs;
1530 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1533 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1535 /* check the cases where we either have nothing pending or have read
1536 * a bogus register value which can indicate HW removal or PCI fault
1538 if (!irq_stat || (0xffffffffU == irq_stat))
1541 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1542 spin_lock(&host->lock);
1544 for (hc = 0; hc < n_hcs; hc++) {
1545 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1547 mv_host_intr(host, relevant, hc);
1552 if (PCI_ERR & irq_stat) {
1553 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1554 readl(mmio + PCI_IRQ_CAUSE_OFS));
1556 DPRINTK("All regs @ PCI error\n");
1557 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1559 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1562 spin_unlock(&host->lock);
1564 return IRQ_RETVAL(handled);
1567 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1569 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1570 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1572 return hc_mmio + ofs;
1575 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1579 switch (sc_reg_in) {
1583 ofs = sc_reg_in * sizeof(u32);
1592 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1594 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1595 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1596 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1598 if (ofs != 0xffffffffU)
1599 return readl(addr + ofs);
1604 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1606 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1607 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1608 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1610 if (ofs != 0xffffffffU)
1611 writelfl(val, addr + ofs);
1614 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1619 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1621 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1624 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1626 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1629 mv_reset_pci_bus(pdev, mmio);
1632 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1634 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1637 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1640 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1643 tmp = readl(phy_mmio + MV5_PHY_MODE);
1645 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1646 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1649 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1653 writel(0, mmio + MV_GPIO_PORT_CTL);
1655 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1657 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1659 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1662 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1665 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1666 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1668 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1671 tmp = readl(phy_mmio + MV5_LT_MODE);
1673 writel(tmp, phy_mmio + MV5_LT_MODE);
1675 tmp = readl(phy_mmio + MV5_PHY_CTL);
1678 writel(tmp, phy_mmio + MV5_PHY_CTL);
1681 tmp = readl(phy_mmio + MV5_PHY_MODE);
1683 tmp |= hpriv->signal[port].pre;
1684 tmp |= hpriv->signal[port].amps;
1685 writel(tmp, phy_mmio + MV5_PHY_MODE);
1690 #define ZERO(reg) writel(0, port_mmio + (reg))
1691 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1694 void __iomem *port_mmio = mv_port_base(mmio, port);
1696 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1698 mv_channel_reset(hpriv, mmio, port);
1700 ZERO(0x028); /* command */
1701 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1702 ZERO(0x004); /* timer */
1703 ZERO(0x008); /* irq err cause */
1704 ZERO(0x00c); /* irq err mask */
1705 ZERO(0x010); /* rq bah */
1706 ZERO(0x014); /* rq inp */
1707 ZERO(0x018); /* rq outp */
1708 ZERO(0x01c); /* respq bah */
1709 ZERO(0x024); /* respq outp */
1710 ZERO(0x020); /* respq inp */
1711 ZERO(0x02c); /* test control */
1712 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1716 #define ZERO(reg) writel(0, hc_mmio + (reg))
1717 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1720 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1728 tmp = readl(hc_mmio + 0x20);
1731 writel(tmp, hc_mmio + 0x20);
1735 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1738 unsigned int hc, port;
1740 for (hc = 0; hc < n_hc; hc++) {
1741 for (port = 0; port < MV_PORTS_PER_HC; port++)
1742 mv5_reset_hc_port(hpriv, mmio,
1743 (hc * MV_PORTS_PER_HC) + port);
1745 mv5_reset_one_hc(hpriv, mmio, hc);
1752 #define ZERO(reg) writel(0, mmio + (reg))
1753 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1757 tmp = readl(mmio + MV_PCI_MODE);
1759 writel(tmp, mmio + MV_PCI_MODE);
1761 ZERO(MV_PCI_DISC_TIMER);
1762 ZERO(MV_PCI_MSI_TRIGGER);
1763 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1764 ZERO(HC_MAIN_IRQ_MASK_OFS);
1765 ZERO(MV_PCI_SERR_MASK);
1766 ZERO(PCI_IRQ_CAUSE_OFS);
1767 ZERO(PCI_IRQ_MASK_OFS);
1768 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1769 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1770 ZERO(MV_PCI_ERR_ATTRIBUTE);
1771 ZERO(MV_PCI_ERR_COMMAND);
1775 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1779 mv5_reset_flash(hpriv, mmio);
1781 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1783 tmp |= (1 << 5) | (1 << 6);
1784 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1788 * mv6_reset_hc - Perform the 6xxx global soft reset
1789 * @mmio: base address of the HBA
1791 * This routine only applies to 6xxx parts.
1794 * Inherited from caller.
1796 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1799 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1803 /* Following procedure defined in PCI "main command and status
1807 writel(t | STOP_PCI_MASTER, reg);
1809 for (i = 0; i < 1000; i++) {
1812 if (PCI_MASTER_EMPTY & t) {
1816 if (!(PCI_MASTER_EMPTY & t)) {
1817 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1825 writel(t | GLOB_SFT_RST, reg);
1828 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1830 if (!(GLOB_SFT_RST & t)) {
1831 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1836 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1839 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1842 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1844 if (GLOB_SFT_RST & t) {
1845 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1852 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1855 void __iomem *port_mmio;
1858 tmp = readl(mmio + MV_RESET_CFG);
1859 if ((tmp & (1 << 0)) == 0) {
1860 hpriv->signal[idx].amps = 0x7 << 8;
1861 hpriv->signal[idx].pre = 0x1 << 5;
1865 port_mmio = mv_port_base(mmio, idx);
1866 tmp = readl(port_mmio + PHY_MODE2);
1868 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1869 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1872 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1874 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1877 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1880 void __iomem *port_mmio = mv_port_base(mmio, port);
1882 u32 hp_flags = hpriv->hp_flags;
1884 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1886 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1889 if (fix_phy_mode2) {
1890 m2 = readl(port_mmio + PHY_MODE2);
1893 writel(m2, port_mmio + PHY_MODE2);
1897 m2 = readl(port_mmio + PHY_MODE2);
1898 m2 &= ~((1 << 16) | (1 << 31));
1899 writel(m2, port_mmio + PHY_MODE2);
1904 /* who knows what this magic does */
1905 tmp = readl(port_mmio + PHY_MODE3);
1908 writel(tmp, port_mmio + PHY_MODE3);
1910 if (fix_phy_mode4) {
1913 m4 = readl(port_mmio + PHY_MODE4);
1915 if (hp_flags & MV_HP_ERRATA_60X1B2)
1916 tmp = readl(port_mmio + 0x310);
1918 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1920 writel(m4, port_mmio + PHY_MODE4);
1922 if (hp_flags & MV_HP_ERRATA_60X1B2)
1923 writel(tmp, port_mmio + 0x310);
1926 /* Revert values of pre-emphasis and signal amps to the saved ones */
1927 m2 = readl(port_mmio + PHY_MODE2);
1929 m2 &= ~MV_M2_PREAMP_MASK;
1930 m2 |= hpriv->signal[port].amps;
1931 m2 |= hpriv->signal[port].pre;
1934 /* according to mvSata 3.6.1, some IIE values are fixed */
1935 if (IS_GEN_IIE(hpriv)) {
1940 writel(m2, port_mmio + PHY_MODE2);
1943 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1944 unsigned int port_no)
1946 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1948 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1950 if (IS_60XX(hpriv)) {
1951 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1952 ifctl |= (1 << 7); /* enable gen2i speed */
1953 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1954 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1957 udelay(25); /* allow reset propagation */
1959 /* Spec never mentions clearing the bit. Marvell's driver does
1960 * clear the bit, however.
1962 writelfl(0, port_mmio + EDMA_CMD_OFS);
1964 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1970 static void mv_stop_and_reset(struct ata_port *ap)
1972 struct mv_host_priv *hpriv = ap->host->private_data;
1973 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1977 mv_channel_reset(hpriv, mmio, ap->port_no);
1979 __mv_phy_reset(ap, 0);
1982 static inline void __msleep(unsigned int msec, int can_sleep)
1991 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1992 * @ap: ATA channel to manipulate
1994 * Part of this is taken from __sata_phy_reset and modified to
1995 * not sleep since this routine gets called from interrupt level.
1998 * Inherited from caller. This is coded to safe to call at
1999 * interrupt level, i.e. it does not sleep.
2001 static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
2003 struct mv_port_priv *pp = ap->private_data;
2004 struct mv_host_priv *hpriv = ap->host->private_data;
2005 void __iomem *port_mmio = mv_ap_base(ap);
2006 struct ata_taskfile tf;
2007 struct ata_device *dev = &ap->device[0];
2008 unsigned long deadline;
2012 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2014 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2015 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2016 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2018 /* Issue COMRESET via SControl */
2020 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2021 __msleep(1, can_sleep);
2023 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2024 __msleep(20, can_sleep);
2026 deadline = jiffies + msecs_to_jiffies(200);
2028 sata_scr_read(ap, SCR_STATUS, &sstatus);
2029 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2032 __msleep(1, can_sleep);
2033 } while (time_before(jiffies, deadline));
2035 /* work around errata */
2036 if (IS_60XX(hpriv) &&
2037 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2039 goto comreset_retry;
2041 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2042 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2043 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2045 if (ata_port_online(ap)) {
2048 sata_scr_read(ap, SCR_STATUS, &sstatus);
2049 ata_port_printk(ap, KERN_INFO,
2050 "no device found (phy stat %08x)\n", sstatus);
2051 ata_port_disable(ap);
2055 /* even after SStatus reflects that device is ready,
2056 * it seems to take a while for link to be fully
2057 * established (and thus Status no longer 0x80/0x7F),
2058 * so we poll a bit for that, here.
2062 u8 drv_stat = ata_check_status(ap);
2063 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2065 __msleep(500, can_sleep);
2070 tf.lbah = readb(ap->ioaddr.lbah_addr);
2071 tf.lbam = readb(ap->ioaddr.lbam_addr);
2072 tf.lbal = readb(ap->ioaddr.lbal_addr);
2073 tf.nsect = readb(ap->ioaddr.nsect_addr);
2075 dev->class = ata_dev_classify(&tf);
2076 if (!ata_dev_enabled(dev)) {
2077 VPRINTK("Port disabled post-sig: No device present.\n");
2078 ata_port_disable(ap);
2081 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2083 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2088 static void mv_phy_reset(struct ata_port *ap)
2090 __mv_phy_reset(ap, 1);
2094 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2095 * @ap: ATA channel to manipulate
2097 * Intent is to clear all pending error conditions, reset the
2098 * chip/bus, fail the command, and move on.
2101 * This routine holds the host lock while failing the command.
2103 static void mv_eng_timeout(struct ata_port *ap)
2105 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2106 struct ata_queued_cmd *qc;
2107 unsigned long flags;
2109 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2110 DPRINTK("All regs @ start of eng_timeout\n");
2111 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
2113 qc = ata_qc_from_tag(ap, ap->active_tag);
2114 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2115 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2117 spin_lock_irqsave(&ap->host->lock, flags);
2119 mv_stop_and_reset(ap);
2120 spin_unlock_irqrestore(&ap->host->lock, flags);
2122 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2123 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2124 qc->err_mask |= AC_ERR_TIMEOUT;
2125 ata_eh_qc_complete(qc);
2130 * mv_port_init - Perform some early initialization on a single port.
2131 * @port: libata data structure storing shadow register addresses
2132 * @port_mmio: base address of the port
2134 * Initialize shadow register mmio addresses, clear outstanding
2135 * interrupts on the port, and unmask interrupts for the future
2136 * start of the port.
2139 * Inherited from caller.
2141 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2143 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2146 /* PIO related setup
2148 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2150 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2151 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2152 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2153 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2154 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2155 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2157 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2158 /* special case: control/altstatus doesn't have ATA_REG_ address */
2159 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2162 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2164 /* Clear any currently outstanding port interrupt conditions */
2165 serr_ofs = mv_scr_offset(SCR_ERROR);
2166 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2167 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2169 /* unmask all EDMA error interrupts */
2170 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2172 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2173 readl(port_mmio + EDMA_CFG_OFS),
2174 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2175 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2178 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2180 struct pci_dev *pdev = to_pci_dev(host->dev);
2181 struct mv_host_priv *hpriv = host->private_data;
2183 u32 hp_flags = hpriv->hp_flags;
2185 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2189 hpriv->ops = &mv5xxx_ops;
2190 hp_flags |= MV_HP_50XX;
2194 hp_flags |= MV_HP_ERRATA_50XXB0;
2197 hp_flags |= MV_HP_ERRATA_50XXB2;
2200 dev_printk(KERN_WARNING, &pdev->dev,
2201 "Applying 50XXB2 workarounds to unknown rev\n");
2202 hp_flags |= MV_HP_ERRATA_50XXB2;
2209 hpriv->ops = &mv5xxx_ops;
2210 hp_flags |= MV_HP_50XX;
2214 hp_flags |= MV_HP_ERRATA_50XXB0;
2217 hp_flags |= MV_HP_ERRATA_50XXB2;
2220 dev_printk(KERN_WARNING, &pdev->dev,
2221 "Applying B2 workarounds to unknown rev\n");
2222 hp_flags |= MV_HP_ERRATA_50XXB2;
2229 hpriv->ops = &mv6xxx_ops;
2233 hp_flags |= MV_HP_ERRATA_60X1B2;
2236 hp_flags |= MV_HP_ERRATA_60X1C0;
2239 dev_printk(KERN_WARNING, &pdev->dev,
2240 "Applying B2 workarounds to unknown rev\n");
2241 hp_flags |= MV_HP_ERRATA_60X1B2;
2248 hpriv->ops = &mv6xxx_ops;
2250 hp_flags |= MV_HP_GEN_IIE;
2254 hp_flags |= MV_HP_ERRATA_XX42A0;
2257 hp_flags |= MV_HP_ERRATA_60X1C0;
2260 dev_printk(KERN_WARNING, &pdev->dev,
2261 "Applying 60X1C0 workarounds to unknown rev\n");
2262 hp_flags |= MV_HP_ERRATA_60X1C0;
2268 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2272 hpriv->hp_flags = hp_flags;
2278 * mv_init_host - Perform some early initialization of the host.
2279 * @host: ATA host to initialize
2280 * @board_idx: controller index
2282 * If possible, do an early global reset of the host. Then do
2283 * our port init and clear/unmask all/relevant host interrupts.
2286 * Inherited from caller.
2288 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2290 int rc = 0, n_hc, port, hc;
2291 struct pci_dev *pdev = to_pci_dev(host->dev);
2292 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2293 struct mv_host_priv *hpriv = host->private_data;
2295 /* global interrupt mask */
2296 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2298 rc = mv_chip_id(host, board_idx);
2302 n_hc = mv_get_hc_count(host->ports[0]->flags);
2304 for (port = 0; port < host->n_ports; port++)
2305 hpriv->ops->read_preamp(hpriv, port, mmio);
2307 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2311 hpriv->ops->reset_flash(hpriv, mmio);
2312 hpriv->ops->reset_bus(pdev, mmio);
2313 hpriv->ops->enable_leds(hpriv, mmio);
2315 for (port = 0; port < host->n_ports; port++) {
2316 if (IS_60XX(hpriv)) {
2317 void __iomem *port_mmio = mv_port_base(mmio, port);
2319 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2320 ifctl |= (1 << 7); /* enable gen2i speed */
2321 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2322 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2325 hpriv->ops->phy_errata(hpriv, mmio, port);
2328 for (port = 0; port < host->n_ports; port++) {
2329 void __iomem *port_mmio = mv_port_base(mmio, port);
2330 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2333 for (hc = 0; hc < n_hc; hc++) {
2334 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2336 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2337 "(before clear)=0x%08x\n", hc,
2338 readl(hc_mmio + HC_CFG_OFS),
2339 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2341 /* Clear any currently outstanding hc interrupt conditions */
2342 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2345 /* Clear any currently outstanding host interrupt conditions */
2346 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2348 /* and unmask interrupt generation for host regs */
2349 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2352 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2354 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2356 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2357 "PCI int cause/mask=0x%08x/0x%08x\n",
2358 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2359 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2360 readl(mmio + PCI_IRQ_CAUSE_OFS),
2361 readl(mmio + PCI_IRQ_MASK_OFS));
2368 * mv_print_info - Dump key info to kernel log for perusal.
2369 * @host: ATA host to print info about
2371 * FIXME: complete this.
2374 * Inherited from caller.
2376 static void mv_print_info(struct ata_host *host)
2378 struct pci_dev *pdev = to_pci_dev(host->dev);
2379 struct mv_host_priv *hpriv = host->private_data;
2381 const char *scc_s, *gen;
2383 /* Use this to determine the HW stepping of the chip so we know
2384 * what errata to workaround
2386 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2388 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2391 else if (scc == 0x01)
2396 if (IS_GEN_I(hpriv))
2398 else if (IS_GEN_II(hpriv))
2400 else if (IS_GEN_IIE(hpriv))
2405 dev_printk(KERN_INFO, &pdev->dev,
2406 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2407 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2408 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2412 * mv_init_one - handle a positive probe of a Marvell host
2413 * @pdev: PCI device found
2414 * @ent: PCI device ID entry for the matched host
2417 * Inherited from caller.
2419 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2421 static int printed_version = 0;
2422 unsigned int board_idx = (unsigned int)ent->driver_data;
2423 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2424 struct ata_host *host;
2425 struct mv_host_priv *hpriv;
2428 if (!printed_version++)
2429 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2432 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2434 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2435 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2436 if (!host || !hpriv)
2438 host->private_data = hpriv;
2440 /* acquire resources */
2441 rc = pcim_enable_device(pdev);
2445 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2447 pcim_pin_device(pdev);
2450 host->iomap = pcim_iomap_table(pdev);
2452 rc = pci_go_64(pdev);
2456 /* initialize adapter */
2457 rc = mv_init_host(host, board_idx);
2461 /* Enable interrupts */
2462 if (msi && pci_enable_msi(pdev))
2465 mv_dump_pci_cfg(pdev, 0x68);
2466 mv_print_info(host);
2468 pci_set_master(pdev);
2470 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2471 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2474 static int __init mv_init(void)
2476 return pci_register_driver(&mv_pci_driver);
2479 static void __exit mv_exit(void)
2481 pci_unregister_driver(&mv_pci_driver);
2484 MODULE_AUTHOR("Brett Russ");
2485 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2486 MODULE_LICENSE("GPL");
2487 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2488 MODULE_VERSION(DRV_VERSION);
2490 module_param(msi, int, 0444);
2491 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2493 module_init(mv_init);
2494 module_exit(mv_exit);