2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <linux/libata.h>
74 #define DRV_NAME "sata_mv"
75 #define DRV_VERSION "1.0"
78 /* BAR's are enumerated in terms of pci_resource_start() terms */
79 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
80 MV_IO_BAR = 2, /* offset 0x18: IO space */
81 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
84 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
87 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
88 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
89 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
90 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
91 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
92 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94 MV_SATAHC0_REG_BASE = 0x20000,
95 MV_FLASH_CTL = 0x1046c,
96 MV_GPIO_PORT_CTL = 0x104f0,
97 MV_RESET_CFG = 0x180d8,
99 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
100 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
102 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
105 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
108 * CRPB needs alignment on a 256B boundary. Size == 256B
109 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
116 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
119 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
120 MV_PORT_HC_SHIFT = 2,
121 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
125 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
126 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
127 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
128 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
129 ATA_FLAG_PIO_POLLING,
130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
132 CRQB_FLAG_READ = (1 << 0),
134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
135 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
136 CRQB_CMD_ADDR_SHIFT = 8,
137 CRQB_CMD_CS = (0x2 << 11),
138 CRQB_CMD_LAST = (1 << 15),
140 CRPB_FLAG_STATUS_SHIFT = 8,
141 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
142 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
144 EPRD_FLAG_END_OF_TBL = (1 << 31),
146 /* PCI interface registers */
148 PCI_COMMAND_OFS = 0xc00,
150 PCI_MAIN_CMD_STS_OFS = 0xd30,
151 STOP_PCI_MASTER = (1 << 2),
152 PCI_MASTER_EMPTY = (1 << 3),
153 GLOB_SFT_RST = (1 << 4),
156 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
157 MV_PCI_DISC_TIMER = 0xd04,
158 MV_PCI_MSI_TRIGGER = 0xc38,
159 MV_PCI_SERR_MASK = 0xc28,
160 MV_PCI_XBAR_TMOUT = 0x1d04,
161 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
162 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
163 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
164 MV_PCI_ERR_COMMAND = 0x1d50,
166 PCI_IRQ_CAUSE_OFS = 0x1d58,
167 PCI_IRQ_MASK_OFS = 0x1d5c,
168 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
171 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
172 PORT0_ERR = (1 << 0), /* shift by port # */
173 PORT0_DONE = (1 << 1), /* shift by port # */
174 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
175 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
177 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
178 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
179 PORTS_0_3_COAL_DONE = (1 << 8),
180 PORTS_4_7_COAL_DONE = (1 << 17),
181 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
182 GPIO_INT = (1 << 22),
183 SELF_INT = (1 << 23),
184 TWSI_INT = (1 << 24),
185 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
186 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
187 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
188 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
193 /* SATAHC registers */
196 HC_IRQ_CAUSE_OFS = 0x14,
197 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
198 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
199 DEV_IRQ = (1 << 8), /* shift by port # */
201 /* Shadow block registers */
203 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
206 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
207 SATA_ACTIVE_OFS = 0x350,
214 SATA_INTERFACE_CTL = 0x050,
216 MV_M2_PREAMP_MASK = 0x7e0,
220 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
221 EDMA_CFG_NCQ = (1 << 5),
222 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
223 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
224 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
226 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
227 EDMA_ERR_IRQ_MASK_OFS = 0xc,
228 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
229 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
230 EDMA_ERR_DEV = (1 << 2), /* device error */
231 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
232 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
233 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
234 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
235 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
236 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
237 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
238 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
239 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
240 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
241 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
242 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
243 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
244 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
245 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
246 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
247 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
248 EDMA_ERR_OVERRUN_5 = (1 << 5),
249 EDMA_ERR_UNDERRUN_5 = (1 << 6),
250 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
260 EDMA_ERR_LNK_CTRL_RX_2 |
261 EDMA_ERR_LNK_DATA_RX |
262 EDMA_ERR_LNK_DATA_TX |
263 EDMA_ERR_TRANS_PROTO,
264 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
269 EDMA_ERR_UNDERRUN_5 |
270 EDMA_ERR_SELF_DIS_5 |
276 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
277 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
279 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
280 EDMA_REQ_Q_PTR_SHIFT = 5,
282 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
283 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
284 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
285 EDMA_RSP_Q_PTR_SHIFT = 3,
287 EDMA_CMD_OFS = 0x28, /* EDMA command register */
288 EDMA_EN = (1 << 0), /* enable EDMA */
289 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
290 ATA_RST = (1 << 2), /* reset trans/link/phy */
292 EDMA_IORDY_TMOUT = 0x34,
295 /* Host private flags (hp_flags) */
296 MV_HP_FLAG_MSI = (1 << 0),
297 MV_HP_ERRATA_50XXB0 = (1 << 1),
298 MV_HP_ERRATA_50XXB2 = (1 << 2),
299 MV_HP_ERRATA_60X1B2 = (1 << 3),
300 MV_HP_ERRATA_60X1C0 = (1 << 4),
301 MV_HP_ERRATA_XX42A0 = (1 << 5),
302 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
303 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
304 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
306 /* Port private flags (pp_flags) */
307 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
308 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
311 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
312 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
313 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
316 MV_DMA_BOUNDARY = 0xffffffffU,
318 /* mask of register bits containing lower 32 bits
319 * of EDMA request queue DMA address
321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
323 /* ditto, for response queue */
324 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
337 /* Command ReQuest Block: 32B */
353 /* Command ResPonse Block: 8B */
360 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
368 struct mv_port_priv {
369 struct mv_crqb *crqb;
371 struct mv_crpb *crpb;
373 struct mv_sg *sg_tbl;
374 dma_addr_t sg_tbl_dma;
376 unsigned int req_idx;
377 unsigned int resp_idx;
382 struct mv_port_signal {
389 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
391 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
392 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
394 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
396 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
397 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
400 struct mv_host_priv {
402 struct mv_port_signal signal[8];
403 const struct mv_hw_ops *ops;
406 static void mv_irq_clear(struct ata_port *ap);
407 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
408 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
409 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
410 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
411 static int mv_port_start(struct ata_port *ap);
412 static void mv_port_stop(struct ata_port *ap);
413 static void mv_qc_prep(struct ata_queued_cmd *qc);
414 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
415 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
416 static void mv_error_handler(struct ata_port *ap);
417 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
418 static void mv_eh_freeze(struct ata_port *ap);
419 static void mv_eh_thaw(struct ata_port *ap);
420 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
422 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
424 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
425 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
427 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
429 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
430 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
432 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
434 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
435 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
437 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
439 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
440 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
441 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
442 unsigned int port_no);
444 static struct scsi_host_template mv5_sht = {
445 .module = THIS_MODULE,
447 .ioctl = ata_scsi_ioctl,
448 .queuecommand = ata_scsi_queuecmd,
449 .can_queue = ATA_DEF_QUEUE,
450 .this_id = ATA_SHT_THIS_ID,
451 .sg_tablesize = MV_MAX_SG_CT,
452 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
453 .emulated = ATA_SHT_EMULATED,
455 .proc_name = DRV_NAME,
456 .dma_boundary = MV_DMA_BOUNDARY,
457 .slave_configure = ata_scsi_slave_config,
458 .slave_destroy = ata_scsi_slave_destroy,
459 .bios_param = ata_std_bios_param,
462 static struct scsi_host_template mv6_sht = {
463 .module = THIS_MODULE,
465 .ioctl = ata_scsi_ioctl,
466 .queuecommand = ata_scsi_queuecmd,
467 .can_queue = ATA_DEF_QUEUE,
468 .this_id = ATA_SHT_THIS_ID,
469 .sg_tablesize = MV_MAX_SG_CT,
470 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
471 .emulated = ATA_SHT_EMULATED,
473 .proc_name = DRV_NAME,
474 .dma_boundary = MV_DMA_BOUNDARY,
475 .slave_configure = ata_scsi_slave_config,
476 .slave_destroy = ata_scsi_slave_destroy,
477 .bios_param = ata_std_bios_param,
480 static const struct ata_port_operations mv5_ops = {
481 .port_disable = ata_port_disable,
483 .tf_load = ata_tf_load,
484 .tf_read = ata_tf_read,
485 .check_status = ata_check_status,
486 .exec_command = ata_exec_command,
487 .dev_select = ata_std_dev_select,
489 .cable_detect = ata_cable_sata,
491 .qc_prep = mv_qc_prep,
492 .qc_issue = mv_qc_issue,
493 .data_xfer = ata_data_xfer,
495 .irq_clear = mv_irq_clear,
496 .irq_on = ata_irq_on,
497 .irq_ack = ata_irq_ack,
499 .error_handler = mv_error_handler,
500 .post_internal_cmd = mv_post_int_cmd,
501 .freeze = mv_eh_freeze,
504 .scr_read = mv5_scr_read,
505 .scr_write = mv5_scr_write,
507 .port_start = mv_port_start,
508 .port_stop = mv_port_stop,
511 static const struct ata_port_operations mv6_ops = {
512 .port_disable = ata_port_disable,
514 .tf_load = ata_tf_load,
515 .tf_read = ata_tf_read,
516 .check_status = ata_check_status,
517 .exec_command = ata_exec_command,
518 .dev_select = ata_std_dev_select,
520 .cable_detect = ata_cable_sata,
522 .qc_prep = mv_qc_prep,
523 .qc_issue = mv_qc_issue,
524 .data_xfer = ata_data_xfer,
526 .irq_clear = mv_irq_clear,
527 .irq_on = ata_irq_on,
528 .irq_ack = ata_irq_ack,
530 .error_handler = mv_error_handler,
531 .post_internal_cmd = mv_post_int_cmd,
532 .freeze = mv_eh_freeze,
535 .scr_read = mv_scr_read,
536 .scr_write = mv_scr_write,
538 .port_start = mv_port_start,
539 .port_stop = mv_port_stop,
542 static const struct ata_port_operations mv_iie_ops = {
543 .port_disable = ata_port_disable,
545 .tf_load = ata_tf_load,
546 .tf_read = ata_tf_read,
547 .check_status = ata_check_status,
548 .exec_command = ata_exec_command,
549 .dev_select = ata_std_dev_select,
551 .cable_detect = ata_cable_sata,
553 .qc_prep = mv_qc_prep_iie,
554 .qc_issue = mv_qc_issue,
555 .data_xfer = ata_data_xfer,
557 .irq_clear = mv_irq_clear,
558 .irq_on = ata_irq_on,
559 .irq_ack = ata_irq_ack,
561 .error_handler = mv_error_handler,
562 .post_internal_cmd = mv_post_int_cmd,
563 .freeze = mv_eh_freeze,
566 .scr_read = mv_scr_read,
567 .scr_write = mv_scr_write,
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
573 static const struct ata_port_info mv_port_info[] = {
575 .flags = MV_COMMON_FLAGS,
576 .pio_mask = 0x1f, /* pio0-4 */
577 .udma_mask = ATA_UDMA6,
578 .port_ops = &mv5_ops,
581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
582 .pio_mask = 0x1f, /* pio0-4 */
583 .udma_mask = ATA_UDMA6,
584 .port_ops = &mv5_ops,
587 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
588 .pio_mask = 0x1f, /* pio0-4 */
589 .udma_mask = ATA_UDMA6,
590 .port_ops = &mv5_ops,
593 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
594 .pio_mask = 0x1f, /* pio0-4 */
595 .udma_mask = ATA_UDMA6,
596 .port_ops = &mv6_ops,
599 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
601 .pio_mask = 0x1f, /* pio0-4 */
602 .udma_mask = ATA_UDMA6,
603 .port_ops = &mv6_ops,
606 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
607 .pio_mask = 0x1f, /* pio0-4 */
608 .udma_mask = ATA_UDMA6,
609 .port_ops = &mv_iie_ops,
612 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
613 .pio_mask = 0x1f, /* pio0-4 */
614 .udma_mask = ATA_UDMA6,
615 .port_ops = &mv_iie_ops,
619 static const struct pci_device_id mv_pci_tbl[] = {
620 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
622 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
623 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
624 /* RocketRAID 1740/174x have different identifiers */
625 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
626 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
628 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
629 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
630 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
631 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
632 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
634 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
637 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
639 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
641 /* add Marvell 7042 support */
642 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
644 { } /* terminate list */
647 static struct pci_driver mv_pci_driver = {
649 .id_table = mv_pci_tbl,
650 .probe = mv_init_one,
651 .remove = ata_pci_remove_one,
654 static const struct mv_hw_ops mv5xxx_ops = {
655 .phy_errata = mv5_phy_errata,
656 .enable_leds = mv5_enable_leds,
657 .read_preamp = mv5_read_preamp,
658 .reset_hc = mv5_reset_hc,
659 .reset_flash = mv5_reset_flash,
660 .reset_bus = mv5_reset_bus,
663 static const struct mv_hw_ops mv6xxx_ops = {
664 .phy_errata = mv6_phy_errata,
665 .enable_leds = mv6_enable_leds,
666 .read_preamp = mv6_read_preamp,
667 .reset_hc = mv6_reset_hc,
668 .reset_flash = mv6_reset_flash,
669 .reset_bus = mv_reset_pci_bus,
675 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
678 /* move to PCI layer or libata core? */
679 static int pci_go_64(struct pci_dev *pdev)
683 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
684 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
686 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
688 dev_printk(KERN_ERR, &pdev->dev,
689 "64-bit DMA enable failed\n");
694 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
696 dev_printk(KERN_ERR, &pdev->dev,
697 "32-bit DMA enable failed\n");
700 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
702 dev_printk(KERN_ERR, &pdev->dev,
703 "32-bit consistent DMA enable failed\n");
715 static inline void writelfl(unsigned long data, void __iomem *addr)
718 (void) readl(addr); /* flush to avoid PCI posted write */
721 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
723 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
726 static inline unsigned int mv_hc_from_port(unsigned int port)
728 return port >> MV_PORT_HC_SHIFT;
731 static inline unsigned int mv_hardport_from_port(unsigned int port)
733 return port & MV_PORT_MASK;
736 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
739 return mv_hc_base(base, mv_hc_from_port(port));
742 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
744 return mv_hc_base_from_port(base, port) +
745 MV_SATAHC_ARBTR_REG_SZ +
746 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
749 static inline void __iomem *mv_ap_base(struct ata_port *ap)
751 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
754 static inline int mv_get_hc_count(unsigned long port_flags)
756 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
759 static void mv_irq_clear(struct ata_port *ap)
763 static void mv_set_edma_ptrs(void __iomem *port_mmio,
764 struct mv_host_priv *hpriv,
765 struct mv_port_priv *pp)
770 * initialize request queue
772 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
774 WARN_ON(pp->crqb_dma & 0x3ff);
775 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
776 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
777 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
779 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
780 writelfl((pp->crqb_dma & 0xffffffff) | index,
781 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
783 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
786 * initialize response queue
788 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
790 WARN_ON(pp->crpb_dma & 0xff);
791 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
793 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
794 writelfl((pp->crpb_dma & 0xffffffff) | index,
795 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
797 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
799 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
800 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
804 * mv_start_dma - Enable eDMA engine
805 * @base: port base address
806 * @pp: port private data
808 * Verify the local cache of the eDMA state is accurate with a
812 * Inherited from caller.
814 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
815 struct mv_port_priv *pp)
817 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
818 /* clear EDMA event indicators, if any */
819 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
821 mv_set_edma_ptrs(base, hpriv, pp);
823 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
824 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
826 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
830 * __mv_stop_dma - Disable eDMA engine
831 * @ap: ATA channel to manipulate
833 * Verify the local cache of the eDMA state is accurate with a
837 * Inherited from caller.
839 static int __mv_stop_dma(struct ata_port *ap)
841 void __iomem *port_mmio = mv_ap_base(ap);
842 struct mv_port_priv *pp = ap->private_data;
846 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
847 /* Disable EDMA if active. The disable bit auto clears.
849 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
850 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
852 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
855 /* now properly wait for the eDMA to stop */
856 for (i = 1000; i > 0; i--) {
857 reg = readl(port_mmio + EDMA_CMD_OFS);
858 if (!(reg & EDMA_EN))
865 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
872 static int mv_stop_dma(struct ata_port *ap)
877 spin_lock_irqsave(&ap->host->lock, flags);
878 rc = __mv_stop_dma(ap);
879 spin_unlock_irqrestore(&ap->host->lock, flags);
885 static void mv_dump_mem(void __iomem *start, unsigned bytes)
888 for (b = 0; b < bytes; ) {
889 DPRINTK("%p: ", start + b);
890 for (w = 0; b < bytes && w < 4; w++) {
891 printk("%08x ",readl(start + b));
899 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
904 for (b = 0; b < bytes; ) {
905 DPRINTK("%02x: ", b);
906 for (w = 0; b < bytes && w < 4; w++) {
907 (void) pci_read_config_dword(pdev,b,&dw);
915 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
916 struct pci_dev *pdev)
919 void __iomem *hc_base = mv_hc_base(mmio_base,
920 port >> MV_PORT_HC_SHIFT);
921 void __iomem *port_base;
922 int start_port, num_ports, p, start_hc, num_hcs, hc;
925 start_hc = start_port = 0;
926 num_ports = 8; /* shld be benign for 4 port devs */
929 start_hc = port >> MV_PORT_HC_SHIFT;
931 num_ports = num_hcs = 1;
933 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
934 num_ports > 1 ? num_ports - 1 : start_port);
937 DPRINTK("PCI config space regs:\n");
938 mv_dump_pci_cfg(pdev, 0x68);
940 DPRINTK("PCI regs:\n");
941 mv_dump_mem(mmio_base+0xc00, 0x3c);
942 mv_dump_mem(mmio_base+0xd00, 0x34);
943 mv_dump_mem(mmio_base+0xf00, 0x4);
944 mv_dump_mem(mmio_base+0x1d00, 0x6c);
945 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
946 hc_base = mv_hc_base(mmio_base, hc);
947 DPRINTK("HC regs (HC %i):\n", hc);
948 mv_dump_mem(hc_base, 0x1c);
950 for (p = start_port; p < start_port + num_ports; p++) {
951 port_base = mv_port_base(mmio_base, p);
952 DPRINTK("EDMA regs (port %i):\n",p);
953 mv_dump_mem(port_base, 0x54);
954 DPRINTK("SATA regs (port %i):\n",p);
955 mv_dump_mem(port_base+0x300, 0x60);
960 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
968 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
971 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
980 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
982 unsigned int ofs = mv_scr_offset(sc_reg_in);
984 if (ofs != 0xffffffffU) {
985 *val = readl(mv_ap_base(ap) + ofs);
991 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
993 unsigned int ofs = mv_scr_offset(sc_reg_in);
995 if (ofs != 0xffffffffU) {
996 writelfl(val, mv_ap_base(ap) + ofs);
1002 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1003 void __iomem *port_mmio)
1005 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1007 /* set up non-NCQ EDMA configuration */
1008 cfg &= ~(1 << 9); /* disable eQue */
1010 if (IS_GEN_I(hpriv)) {
1011 cfg &= ~0x1f; /* clear queue depth */
1012 cfg |= (1 << 8); /* enab config burst size mask */
1015 else if (IS_GEN_II(hpriv)) {
1016 cfg &= ~0x1f; /* clear queue depth */
1017 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1018 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1021 else if (IS_GEN_IIE(hpriv)) {
1022 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1023 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1024 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1025 cfg |= (1 << 18); /* enab early completion */
1026 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1027 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1028 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1031 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1035 * mv_port_start - Port specific init/start routine.
1036 * @ap: ATA channel to manipulate
1038 * Allocate and point to DMA memory, init port private memory,
1042 * Inherited from caller.
1044 static int mv_port_start(struct ata_port *ap)
1046 struct device *dev = ap->host->dev;
1047 struct mv_host_priv *hpriv = ap->host->private_data;
1048 struct mv_port_priv *pp;
1049 void __iomem *port_mmio = mv_ap_base(ap);
1052 unsigned long flags;
1055 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1059 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1063 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1065 rc = ata_pad_alloc(ap, dev);
1069 /* First item in chunk of DMA memory:
1070 * 32-slot command request table (CRQB), 32 bytes each in size
1073 pp->crqb_dma = mem_dma;
1074 mem += MV_CRQB_Q_SZ;
1075 mem_dma += MV_CRQB_Q_SZ;
1078 * 32-slot command response table (CRPB), 8 bytes each in size
1081 pp->crpb_dma = mem_dma;
1082 mem += MV_CRPB_Q_SZ;
1083 mem_dma += MV_CRPB_Q_SZ;
1086 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1089 pp->sg_tbl_dma = mem_dma;
1091 spin_lock_irqsave(&ap->host->lock, flags);
1093 mv_edma_cfg(ap, hpriv, port_mmio);
1095 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1097 spin_unlock_irqrestore(&ap->host->lock, flags);
1099 /* Don't turn on EDMA here...do it before DMA commands only. Else
1100 * we'll be unable to send non-data, PIO, etc due to restricted access
1103 ap->private_data = pp;
1108 * mv_port_stop - Port specific cleanup/stop routine.
1109 * @ap: ATA channel to manipulate
1111 * Stop DMA, cleanup port memory.
1114 * This routine uses the host lock to protect the DMA stop.
1116 static void mv_port_stop(struct ata_port *ap)
1122 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1123 * @qc: queued command whose SG list to source from
1125 * Populate the SG list and mark the last entry.
1128 * Inherited from caller.
1130 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1132 struct mv_port_priv *pp = qc->ap->private_data;
1133 unsigned int n_sg = 0;
1134 struct scatterlist *sg;
1135 struct mv_sg *mv_sg;
1138 ata_for_each_sg(sg, qc) {
1139 dma_addr_t addr = sg_dma_address(sg);
1140 u32 sg_len = sg_dma_len(sg);
1143 u32 offset = addr & 0xffff;
1146 if ((offset + sg_len > 0x10000))
1147 len = 0x10000 - offset;
1149 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1150 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1151 mv_sg->flags_size = cpu_to_le32(len);
1156 if (!sg_len && ata_sg_is_last(sg, qc))
1157 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1168 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1170 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1171 (last ? CRQB_CMD_LAST : 0);
1172 *cmdw = cpu_to_le16(tmp);
1176 * mv_qc_prep - Host specific command preparation.
1177 * @qc: queued command to prepare
1179 * This routine simply redirects to the general purpose routine
1180 * if command is not DMA. Else, it handles prep of the CRQB
1181 * (command request block), does some sanity checking, and calls
1182 * the SG load routine.
1185 * Inherited from caller.
1187 static void mv_qc_prep(struct ata_queued_cmd *qc)
1189 struct ata_port *ap = qc->ap;
1190 struct mv_port_priv *pp = ap->private_data;
1192 struct ata_taskfile *tf;
1196 if (qc->tf.protocol != ATA_PROT_DMA)
1199 /* Fill in command request block
1201 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1202 flags |= CRQB_FLAG_READ;
1203 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1204 flags |= qc->tag << CRQB_TAG_SHIFT;
1205 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1207 /* get current queue index from software */
1208 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1210 pp->crqb[in_index].sg_addr =
1211 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1212 pp->crqb[in_index].sg_addr_hi =
1213 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1214 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1216 cw = &pp->crqb[in_index].ata_cmd[0];
1219 /* Sadly, the CRQB cannot accomodate all registers--there are
1220 * only 11 bytes...so we must pick and choose required
1221 * registers based on the command. So, we drop feature and
1222 * hob_feature for [RW] DMA commands, but they are needed for
1223 * NCQ. NCQ will drop hob_nsect.
1225 switch (tf->command) {
1227 case ATA_CMD_READ_EXT:
1229 case ATA_CMD_WRITE_EXT:
1230 case ATA_CMD_WRITE_FUA_EXT:
1231 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1233 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1234 case ATA_CMD_FPDMA_READ:
1235 case ATA_CMD_FPDMA_WRITE:
1236 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1237 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1239 #endif /* FIXME: remove this line when NCQ added */
1241 /* The only other commands EDMA supports in non-queued and
1242 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1243 * of which are defined/used by Linux. If we get here, this
1244 * driver needs work.
1246 * FIXME: modify libata to give qc_prep a return value and
1247 * return error here.
1249 BUG_ON(tf->command);
1252 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1253 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1254 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1255 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1256 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1257 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1258 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1259 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1260 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1262 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1268 * mv_qc_prep_iie - Host specific command preparation.
1269 * @qc: queued command to prepare
1271 * This routine simply redirects to the general purpose routine
1272 * if command is not DMA. Else, it handles prep of the CRQB
1273 * (command request block), does some sanity checking, and calls
1274 * the SG load routine.
1277 * Inherited from caller.
1279 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1281 struct ata_port *ap = qc->ap;
1282 struct mv_port_priv *pp = ap->private_data;
1283 struct mv_crqb_iie *crqb;
1284 struct ata_taskfile *tf;
1288 if (qc->tf.protocol != ATA_PROT_DMA)
1291 /* Fill in Gen IIE command request block
1293 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1294 flags |= CRQB_FLAG_READ;
1296 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1297 flags |= qc->tag << CRQB_TAG_SHIFT;
1298 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1299 what we use as our tag */
1301 /* get current queue index from software */
1302 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1304 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1305 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1306 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1307 crqb->flags = cpu_to_le32(flags);
1310 crqb->ata_cmd[0] = cpu_to_le32(
1311 (tf->command << 16) |
1314 crqb->ata_cmd[1] = cpu_to_le32(
1320 crqb->ata_cmd[2] = cpu_to_le32(
1321 (tf->hob_lbal << 0) |
1322 (tf->hob_lbam << 8) |
1323 (tf->hob_lbah << 16) |
1324 (tf->hob_feature << 24)
1326 crqb->ata_cmd[3] = cpu_to_le32(
1328 (tf->hob_nsect << 8)
1331 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1337 * mv_qc_issue - Initiate a command to the host
1338 * @qc: queued command to start
1340 * This routine simply redirects to the general purpose routine
1341 * if command is not DMA. Else, it sanity checks our local
1342 * caches of the request producer/consumer indices then enables
1343 * DMA and bumps the request producer index.
1346 * Inherited from caller.
1348 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1350 struct ata_port *ap = qc->ap;
1351 void __iomem *port_mmio = mv_ap_base(ap);
1352 struct mv_port_priv *pp = ap->private_data;
1353 struct mv_host_priv *hpriv = ap->host->private_data;
1356 if (qc->tf.protocol != ATA_PROT_DMA) {
1357 /* We're about to send a non-EDMA capable command to the
1358 * port. Turn off EDMA so there won't be problems accessing
1359 * shadow block, etc registers.
1362 return ata_qc_issue_prot(qc);
1365 mv_start_dma(port_mmio, hpriv, pp);
1367 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1369 /* until we do queuing, the queue should be empty at this point */
1370 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1371 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1375 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1377 /* and write the request in pointer to kick the EDMA to life */
1378 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1379 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1385 * mv_err_intr - Handle error interrupts on the port
1386 * @ap: ATA channel to manipulate
1387 * @reset_allowed: bool: 0 == don't trigger from reset here
1389 * In most cases, just clear the interrupt and move on. However,
1390 * some cases require an eDMA reset, which is done right before
1391 * the COMRESET in mv_phy_reset(). The SERR case requires a
1392 * clear of pending errors in the SATA SERROR register. Finally,
1393 * if the port disabled DMA, update our cached copy to match.
1396 * Inherited from caller.
1398 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1400 void __iomem *port_mmio = mv_ap_base(ap);
1401 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1402 struct mv_port_priv *pp = ap->private_data;
1403 struct mv_host_priv *hpriv = ap->host->private_data;
1404 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1405 unsigned int action = 0, err_mask = 0;
1406 struct ata_eh_info *ehi = &ap->eh_info;
1408 ata_ehi_clear_desc(ehi);
1410 if (!edma_enabled) {
1411 /* just a guess: do we need to do this? should we
1412 * expand this, and do it in all cases?
1414 sata_scr_read(ap, SCR_ERROR, &serr);
1415 sata_scr_write_flush(ap, SCR_ERROR, serr);
1418 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1420 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1423 * all generations share these EDMA error cause bits
1426 if (edma_err_cause & EDMA_ERR_DEV)
1427 err_mask |= AC_ERR_DEV;
1428 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1429 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1430 EDMA_ERR_INTRL_PAR)) {
1431 err_mask |= AC_ERR_ATA_BUS;
1432 action |= ATA_EH_HARDRESET;
1433 ata_ehi_push_desc(ehi, "parity error");
1435 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1436 ata_ehi_hotplugged(ehi);
1437 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1438 "dev disconnect" : "dev connect");
1441 if (IS_GEN_I(hpriv)) {
1442 eh_freeze_mask = EDMA_EH_FREEZE_5;
1444 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1445 struct mv_port_priv *pp = ap->private_data;
1446 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1447 ata_ehi_push_desc(ehi, "EDMA self-disable");
1450 eh_freeze_mask = EDMA_EH_FREEZE;
1452 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1453 struct mv_port_priv *pp = ap->private_data;
1454 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1455 ata_ehi_push_desc(ehi, "EDMA self-disable");
1458 if (edma_err_cause & EDMA_ERR_SERR) {
1459 sata_scr_read(ap, SCR_ERROR, &serr);
1460 sata_scr_write_flush(ap, SCR_ERROR, serr);
1461 err_mask = AC_ERR_ATA_BUS;
1462 action |= ATA_EH_HARDRESET;
1466 /* Clear EDMA now that SERR cleanup done */
1467 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1470 err_mask = AC_ERR_OTHER;
1471 action |= ATA_EH_HARDRESET;
1474 ehi->serror |= serr;
1475 ehi->action |= action;
1478 qc->err_mask |= err_mask;
1480 ehi->err_mask |= err_mask;
1482 if (edma_err_cause & eh_freeze_mask)
1483 ata_port_freeze(ap);
1488 static void mv_intr_pio(struct ata_port *ap)
1490 struct ata_queued_cmd *qc;
1493 /* ignore spurious intr if drive still BUSY */
1494 ata_status = readb(ap->ioaddr.status_addr);
1495 if (unlikely(ata_status & ATA_BUSY))
1498 /* get active ATA command */
1499 qc = ata_qc_from_tag(ap, ap->active_tag);
1500 if (unlikely(!qc)) /* no active tag */
1502 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1505 /* and finally, complete the ATA command */
1506 qc->err_mask |= ac_err_mask(ata_status);
1507 ata_qc_complete(qc);
1510 static void mv_intr_edma(struct ata_port *ap)
1512 void __iomem *port_mmio = mv_ap_base(ap);
1513 struct mv_host_priv *hpriv = ap->host->private_data;
1514 struct mv_port_priv *pp = ap->private_data;
1515 struct ata_queued_cmd *qc;
1516 u32 out_index, in_index;
1517 bool work_done = false;
1519 /* get h/w response queue pointer */
1520 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1521 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1527 /* get s/w response queue last-read pointer, and compare */
1528 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1529 if (in_index == out_index)
1532 /* 50xx: get active ATA command */
1533 if (IS_GEN_I(hpriv))
1534 tag = ap->active_tag;
1536 /* Gen II/IIE: get active ATA command via tag, to enable
1537 * support for queueing. this works transparently for
1538 * queued and non-queued modes.
1540 else if (IS_GEN_II(hpriv))
1541 tag = (le16_to_cpu(pp->crpb[out_index].id)
1542 >> CRPB_IOID_SHIFT_6) & 0x3f;
1544 else /* IS_GEN_IIE */
1545 tag = (le16_to_cpu(pp->crpb[out_index].id)
1546 >> CRPB_IOID_SHIFT_7) & 0x3f;
1548 qc = ata_qc_from_tag(ap, tag);
1550 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1551 * bits (WARNING: might not necessarily be associated
1552 * with this command), which -should- be clear
1555 status = le16_to_cpu(pp->crpb[out_index].flags);
1556 if (unlikely(status & 0xff)) {
1557 mv_err_intr(ap, qc);
1561 /* and finally, complete the ATA command */
1564 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1565 ata_qc_complete(qc);
1568 /* advance software response queue pointer, to
1569 * indicate (after the loop completes) to hardware
1570 * that we have consumed a response queue entry.
1577 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1578 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1579 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1583 * mv_host_intr - Handle all interrupts on the given host controller
1584 * @host: host specific structure
1585 * @relevant: port error bits relevant to this host controller
1586 * @hc: which host controller we're to look at
1588 * Read then write clear the HC interrupt status then walk each
1589 * port connected to the HC and see if it needs servicing. Port
1590 * success ints are reported in the HC interrupt status reg, the
1591 * port error ints are reported in the higher level main
1592 * interrupt status register and thus are passed in via the
1593 * 'relevant' argument.
1596 * Inherited from caller.
1598 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1600 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1601 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1608 port0 = MV_PORTS_PER_HC;
1610 /* we'll need the HC success int register in most cases */
1611 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1615 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1617 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1618 hc,relevant,hc_irq_cause);
1620 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1621 struct ata_port *ap = host->ports[port];
1622 struct mv_port_priv *pp = ap->private_data;
1623 int have_err_bits, hard_port, shift;
1625 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1628 shift = port << 1; /* (port * 2) */
1629 if (port >= MV_PORTS_PER_HC) {
1630 shift++; /* skip bit 8 in the HC Main IRQ reg */
1632 have_err_bits = ((PORT0_ERR << shift) & relevant);
1634 if (unlikely(have_err_bits)) {
1635 struct ata_queued_cmd *qc;
1637 qc = ata_qc_from_tag(ap, ap->active_tag);
1638 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1641 mv_err_intr(ap, qc);
1645 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1647 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1648 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1651 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1658 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1660 struct ata_port *ap;
1661 struct ata_queued_cmd *qc;
1662 struct ata_eh_info *ehi;
1663 unsigned int i, err_mask, printed = 0;
1666 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1668 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1671 DPRINTK("All regs @ PCI error\n");
1672 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1674 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1676 for (i = 0; i < host->n_ports; i++) {
1677 ap = host->ports[i];
1678 if (!ata_port_offline(ap)) {
1680 ata_ehi_clear_desc(ehi);
1682 ata_ehi_push_desc(ehi,
1683 "PCI err cause 0x%08x", err_cause);
1684 err_mask = AC_ERR_HOST_BUS;
1685 ehi->action = ATA_EH_HARDRESET;
1686 qc = ata_qc_from_tag(ap, ap->active_tag);
1688 qc->err_mask |= err_mask;
1690 ehi->err_mask |= err_mask;
1692 ata_port_freeze(ap);
1698 * mv_interrupt - Main interrupt event handler
1700 * @dev_instance: private data; in this case the host structure
1702 * Read the read only register to determine if any host
1703 * controllers have pending interrupts. If so, call lower level
1704 * routine to handle. Also check for PCI errors which are only
1708 * This routine holds the host lock while processing pending
1711 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1713 struct ata_host *host = dev_instance;
1714 unsigned int hc, handled = 0, n_hcs;
1715 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1718 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1720 /* check the cases where we either have nothing pending or have read
1721 * a bogus register value which can indicate HW removal or PCI fault
1723 if (!irq_stat || (0xffffffffU == irq_stat))
1726 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1727 spin_lock(&host->lock);
1729 if (unlikely(irq_stat & PCI_ERR)) {
1730 mv_pci_error(host, mmio);
1732 goto out_unlock; /* skip all other HC irq handling */
1735 for (hc = 0; hc < n_hcs; hc++) {
1736 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1738 mv_host_intr(host, relevant, hc);
1744 spin_unlock(&host->lock);
1746 return IRQ_RETVAL(handled);
1749 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1751 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1752 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1754 return hc_mmio + ofs;
1757 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1761 switch (sc_reg_in) {
1765 ofs = sc_reg_in * sizeof(u32);
1774 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1776 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1777 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1778 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1780 if (ofs != 0xffffffffU) {
1781 *val = readl(addr + ofs);
1787 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1789 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1790 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1791 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1793 if (ofs != 0xffffffffU) {
1794 writelfl(val, addr + ofs);
1800 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1804 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1807 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1809 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1812 mv_reset_pci_bus(pdev, mmio);
1815 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1817 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1820 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1823 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1826 tmp = readl(phy_mmio + MV5_PHY_MODE);
1828 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1829 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1832 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1836 writel(0, mmio + MV_GPIO_PORT_CTL);
1838 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1840 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1842 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1845 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1848 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1849 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1851 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1854 tmp = readl(phy_mmio + MV5_LT_MODE);
1856 writel(tmp, phy_mmio + MV5_LT_MODE);
1858 tmp = readl(phy_mmio + MV5_PHY_CTL);
1861 writel(tmp, phy_mmio + MV5_PHY_CTL);
1864 tmp = readl(phy_mmio + MV5_PHY_MODE);
1866 tmp |= hpriv->signal[port].pre;
1867 tmp |= hpriv->signal[port].amps;
1868 writel(tmp, phy_mmio + MV5_PHY_MODE);
1873 #define ZERO(reg) writel(0, port_mmio + (reg))
1874 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1877 void __iomem *port_mmio = mv_port_base(mmio, port);
1879 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1881 mv_channel_reset(hpriv, mmio, port);
1883 ZERO(0x028); /* command */
1884 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1885 ZERO(0x004); /* timer */
1886 ZERO(0x008); /* irq err cause */
1887 ZERO(0x00c); /* irq err mask */
1888 ZERO(0x010); /* rq bah */
1889 ZERO(0x014); /* rq inp */
1890 ZERO(0x018); /* rq outp */
1891 ZERO(0x01c); /* respq bah */
1892 ZERO(0x024); /* respq outp */
1893 ZERO(0x020); /* respq inp */
1894 ZERO(0x02c); /* test control */
1895 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1899 #define ZERO(reg) writel(0, hc_mmio + (reg))
1900 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1903 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1911 tmp = readl(hc_mmio + 0x20);
1914 writel(tmp, hc_mmio + 0x20);
1918 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1921 unsigned int hc, port;
1923 for (hc = 0; hc < n_hc; hc++) {
1924 for (port = 0; port < MV_PORTS_PER_HC; port++)
1925 mv5_reset_hc_port(hpriv, mmio,
1926 (hc * MV_PORTS_PER_HC) + port);
1928 mv5_reset_one_hc(hpriv, mmio, hc);
1935 #define ZERO(reg) writel(0, mmio + (reg))
1936 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1940 tmp = readl(mmio + MV_PCI_MODE);
1942 writel(tmp, mmio + MV_PCI_MODE);
1944 ZERO(MV_PCI_DISC_TIMER);
1945 ZERO(MV_PCI_MSI_TRIGGER);
1946 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1947 ZERO(HC_MAIN_IRQ_MASK_OFS);
1948 ZERO(MV_PCI_SERR_MASK);
1949 ZERO(PCI_IRQ_CAUSE_OFS);
1950 ZERO(PCI_IRQ_MASK_OFS);
1951 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1952 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1953 ZERO(MV_PCI_ERR_ATTRIBUTE);
1954 ZERO(MV_PCI_ERR_COMMAND);
1958 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1962 mv5_reset_flash(hpriv, mmio);
1964 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1966 tmp |= (1 << 5) | (1 << 6);
1967 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1971 * mv6_reset_hc - Perform the 6xxx global soft reset
1972 * @mmio: base address of the HBA
1974 * This routine only applies to 6xxx parts.
1977 * Inherited from caller.
1979 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1982 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1986 /* Following procedure defined in PCI "main command and status
1990 writel(t | STOP_PCI_MASTER, reg);
1992 for (i = 0; i < 1000; i++) {
1995 if (PCI_MASTER_EMPTY & t) {
1999 if (!(PCI_MASTER_EMPTY & t)) {
2000 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2008 writel(t | GLOB_SFT_RST, reg);
2011 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2013 if (!(GLOB_SFT_RST & t)) {
2014 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2019 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2022 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2025 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2027 if (GLOB_SFT_RST & t) {
2028 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2035 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2038 void __iomem *port_mmio;
2041 tmp = readl(mmio + MV_RESET_CFG);
2042 if ((tmp & (1 << 0)) == 0) {
2043 hpriv->signal[idx].amps = 0x7 << 8;
2044 hpriv->signal[idx].pre = 0x1 << 5;
2048 port_mmio = mv_port_base(mmio, idx);
2049 tmp = readl(port_mmio + PHY_MODE2);
2051 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2052 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2055 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2057 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2060 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2063 void __iomem *port_mmio = mv_port_base(mmio, port);
2065 u32 hp_flags = hpriv->hp_flags;
2067 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2069 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2072 if (fix_phy_mode2) {
2073 m2 = readl(port_mmio + PHY_MODE2);
2076 writel(m2, port_mmio + PHY_MODE2);
2080 m2 = readl(port_mmio + PHY_MODE2);
2081 m2 &= ~((1 << 16) | (1 << 31));
2082 writel(m2, port_mmio + PHY_MODE2);
2087 /* who knows what this magic does */
2088 tmp = readl(port_mmio + PHY_MODE3);
2091 writel(tmp, port_mmio + PHY_MODE3);
2093 if (fix_phy_mode4) {
2096 m4 = readl(port_mmio + PHY_MODE4);
2098 if (hp_flags & MV_HP_ERRATA_60X1B2)
2099 tmp = readl(port_mmio + 0x310);
2101 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2103 writel(m4, port_mmio + PHY_MODE4);
2105 if (hp_flags & MV_HP_ERRATA_60X1B2)
2106 writel(tmp, port_mmio + 0x310);
2109 /* Revert values of pre-emphasis and signal amps to the saved ones */
2110 m2 = readl(port_mmio + PHY_MODE2);
2112 m2 &= ~MV_M2_PREAMP_MASK;
2113 m2 |= hpriv->signal[port].amps;
2114 m2 |= hpriv->signal[port].pre;
2117 /* according to mvSata 3.6.1, some IIE values are fixed */
2118 if (IS_GEN_IIE(hpriv)) {
2123 writel(m2, port_mmio + PHY_MODE2);
2126 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2127 unsigned int port_no)
2129 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2131 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2133 if (IS_GEN_II(hpriv)) {
2134 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2135 ifctl |= (1 << 7); /* enable gen2i speed */
2136 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2137 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2140 udelay(25); /* allow reset propagation */
2142 /* Spec never mentions clearing the bit. Marvell's driver does
2143 * clear the bit, however.
2145 writelfl(0, port_mmio + EDMA_CMD_OFS);
2147 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2149 if (IS_GEN_I(hpriv))
2154 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2155 * @ap: ATA channel to manipulate
2157 * Part of this is taken from __sata_phy_reset and modified to
2158 * not sleep since this routine gets called from interrupt level.
2161 * Inherited from caller. This is coded to safe to call at
2162 * interrupt level, i.e. it does not sleep.
2164 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2165 unsigned long deadline)
2167 struct mv_port_priv *pp = ap->private_data;
2168 struct mv_host_priv *hpriv = ap->host->private_data;
2169 void __iomem *port_mmio = mv_ap_base(ap);
2173 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2177 u32 sstatus, serror, scontrol;
2179 mv_scr_read(ap, SCR_STATUS, &sstatus);
2180 mv_scr_read(ap, SCR_ERROR, &serror);
2181 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2182 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2183 "SCtrl 0x%08x\n", status, serror, scontrol);
2187 /* Issue COMRESET via SControl */
2189 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2192 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2196 sata_scr_read(ap, SCR_STATUS, &sstatus);
2197 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2201 } while (time_before(jiffies, deadline));
2203 /* work around errata */
2204 if (IS_GEN_II(hpriv) &&
2205 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2207 goto comreset_retry;
2211 u32 sstatus, serror, scontrol;
2213 mv_scr_read(ap, SCR_STATUS, &sstatus);
2214 mv_scr_read(ap, SCR_ERROR, &serror);
2215 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2216 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2217 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2221 if (ata_port_offline(ap)) {
2222 *class = ATA_DEV_NONE;
2226 /* even after SStatus reflects that device is ready,
2227 * it seems to take a while for link to be fully
2228 * established (and thus Status no longer 0x80/0x7F),
2229 * so we poll a bit for that, here.
2233 u8 drv_stat = ata_check_status(ap);
2234 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2239 if (time_after(jiffies, deadline))
2243 /* FIXME: if we passed the deadline, the following
2244 * code probably produces an invalid result
2247 /* finally, read device signature from TF registers */
2248 *class = ata_dev_try_classify(ap, 0, NULL);
2250 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2252 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2257 static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2259 struct mv_port_priv *pp = ap->private_data;
2260 struct ata_eh_context *ehc = &ap->eh_context;
2263 rc = mv_stop_dma(ap);
2265 ehc->i.action |= ATA_EH_HARDRESET;
2267 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2268 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2269 ehc->i.action |= ATA_EH_HARDRESET;
2272 /* if we're about to do hardreset, nothing more to do */
2273 if (ehc->i.action & ATA_EH_HARDRESET)
2276 if (ata_port_online(ap))
2277 rc = ata_wait_ready(ap, deadline);
2284 static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2285 unsigned long deadline)
2287 struct mv_host_priv *hpriv = ap->host->private_data;
2288 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2292 mv_channel_reset(hpriv, mmio, ap->port_no);
2294 mv_phy_reset(ap, class, deadline);
2299 static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2303 /* print link status */
2304 sata_print_link_status(ap);
2307 sata_scr_read(ap, SCR_ERROR, &serr);
2308 sata_scr_write_flush(ap, SCR_ERROR, serr);
2310 /* bail out if no device is present */
2311 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2312 DPRINTK("EXIT, no device\n");
2316 /* set up device control */
2317 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2320 static void mv_error_handler(struct ata_port *ap)
2322 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2323 mv_hardreset, mv_postreset);
2326 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2328 mv_stop_dma(qc->ap);
2331 static void mv_eh_freeze(struct ata_port *ap)
2333 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2334 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2338 /* FIXME: handle coalescing completion events properly */
2340 shift = ap->port_no * 2;
2344 mask = 0x3 << shift;
2346 /* disable assertion of portN err, done events */
2347 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2348 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2351 static void mv_eh_thaw(struct ata_port *ap)
2353 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2354 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2355 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2356 void __iomem *port_mmio = mv_ap_base(ap);
2357 u32 tmp, mask, hc_irq_cause;
2358 unsigned int shift, hc_port_no = ap->port_no;
2360 /* FIXME: handle coalescing completion events properly */
2362 shift = ap->port_no * 2;
2368 mask = 0x3 << shift;
2370 /* clear EDMA errors on this port */
2371 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2373 /* clear pending irq events */
2374 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2375 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2376 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2377 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2379 /* enable assertion of portN err, done events */
2380 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2381 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2385 * mv_port_init - Perform some early initialization on a single port.
2386 * @port: libata data structure storing shadow register addresses
2387 * @port_mmio: base address of the port
2389 * Initialize shadow register mmio addresses, clear outstanding
2390 * interrupts on the port, and unmask interrupts for the future
2391 * start of the port.
2394 * Inherited from caller.
2396 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2398 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2401 /* PIO related setup
2403 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2405 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2406 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2407 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2408 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2409 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2410 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2412 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2413 /* special case: control/altstatus doesn't have ATA_REG_ address */
2414 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2417 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2419 /* Clear any currently outstanding port interrupt conditions */
2420 serr_ofs = mv_scr_offset(SCR_ERROR);
2421 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2422 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2424 /* unmask all EDMA error interrupts */
2425 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2427 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2428 readl(port_mmio + EDMA_CFG_OFS),
2429 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2430 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2433 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2435 struct pci_dev *pdev = to_pci_dev(host->dev);
2436 struct mv_host_priv *hpriv = host->private_data;
2437 u32 hp_flags = hpriv->hp_flags;
2441 hpriv->ops = &mv5xxx_ops;
2442 hp_flags |= MV_HP_GEN_I;
2444 switch (pdev->revision) {
2446 hp_flags |= MV_HP_ERRATA_50XXB0;
2449 hp_flags |= MV_HP_ERRATA_50XXB2;
2452 dev_printk(KERN_WARNING, &pdev->dev,
2453 "Applying 50XXB2 workarounds to unknown rev\n");
2454 hp_flags |= MV_HP_ERRATA_50XXB2;
2461 hpriv->ops = &mv5xxx_ops;
2462 hp_flags |= MV_HP_GEN_I;
2464 switch (pdev->revision) {
2466 hp_flags |= MV_HP_ERRATA_50XXB0;
2469 hp_flags |= MV_HP_ERRATA_50XXB2;
2472 dev_printk(KERN_WARNING, &pdev->dev,
2473 "Applying B2 workarounds to unknown rev\n");
2474 hp_flags |= MV_HP_ERRATA_50XXB2;
2481 hpriv->ops = &mv6xxx_ops;
2482 hp_flags |= MV_HP_GEN_II;
2484 switch (pdev->revision) {
2486 hp_flags |= MV_HP_ERRATA_60X1B2;
2489 hp_flags |= MV_HP_ERRATA_60X1C0;
2492 dev_printk(KERN_WARNING, &pdev->dev,
2493 "Applying B2 workarounds to unknown rev\n");
2494 hp_flags |= MV_HP_ERRATA_60X1B2;
2501 hpriv->ops = &mv6xxx_ops;
2502 hp_flags |= MV_HP_GEN_IIE;
2504 switch (pdev->revision) {
2506 hp_flags |= MV_HP_ERRATA_XX42A0;
2509 hp_flags |= MV_HP_ERRATA_60X1C0;
2512 dev_printk(KERN_WARNING, &pdev->dev,
2513 "Applying 60X1C0 workarounds to unknown rev\n");
2514 hp_flags |= MV_HP_ERRATA_60X1C0;
2520 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2524 hpriv->hp_flags = hp_flags;
2530 * mv_init_host - Perform some early initialization of the host.
2531 * @host: ATA host to initialize
2532 * @board_idx: controller index
2534 * If possible, do an early global reset of the host. Then do
2535 * our port init and clear/unmask all/relevant host interrupts.
2538 * Inherited from caller.
2540 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2542 int rc = 0, n_hc, port, hc;
2543 struct pci_dev *pdev = to_pci_dev(host->dev);
2544 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2545 struct mv_host_priv *hpriv = host->private_data;
2547 /* global interrupt mask */
2548 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2550 rc = mv_chip_id(host, board_idx);
2554 n_hc = mv_get_hc_count(host->ports[0]->flags);
2556 for (port = 0; port < host->n_ports; port++)
2557 hpriv->ops->read_preamp(hpriv, port, mmio);
2559 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2563 hpriv->ops->reset_flash(hpriv, mmio);
2564 hpriv->ops->reset_bus(pdev, mmio);
2565 hpriv->ops->enable_leds(hpriv, mmio);
2567 for (port = 0; port < host->n_ports; port++) {
2568 if (IS_GEN_II(hpriv)) {
2569 void __iomem *port_mmio = mv_port_base(mmio, port);
2571 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2572 ifctl |= (1 << 7); /* enable gen2i speed */
2573 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2574 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2577 hpriv->ops->phy_errata(hpriv, mmio, port);
2580 for (port = 0; port < host->n_ports; port++) {
2581 void __iomem *port_mmio = mv_port_base(mmio, port);
2582 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2585 for (hc = 0; hc < n_hc; hc++) {
2586 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2588 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2589 "(before clear)=0x%08x\n", hc,
2590 readl(hc_mmio + HC_CFG_OFS),
2591 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2593 /* Clear any currently outstanding hc interrupt conditions */
2594 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2597 /* Clear any currently outstanding host interrupt conditions */
2598 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2600 /* and unmask interrupt generation for host regs */
2601 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2603 if (IS_GEN_I(hpriv))
2604 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2606 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2608 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2609 "PCI int cause/mask=0x%08x/0x%08x\n",
2610 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2611 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2612 readl(mmio + PCI_IRQ_CAUSE_OFS),
2613 readl(mmio + PCI_IRQ_MASK_OFS));
2620 * mv_print_info - Dump key info to kernel log for perusal.
2621 * @host: ATA host to print info about
2623 * FIXME: complete this.
2626 * Inherited from caller.
2628 static void mv_print_info(struct ata_host *host)
2630 struct pci_dev *pdev = to_pci_dev(host->dev);
2631 struct mv_host_priv *hpriv = host->private_data;
2633 const char *scc_s, *gen;
2635 /* Use this to determine the HW stepping of the chip so we know
2636 * what errata to workaround
2638 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2641 else if (scc == 0x01)
2646 if (IS_GEN_I(hpriv))
2648 else if (IS_GEN_II(hpriv))
2650 else if (IS_GEN_IIE(hpriv))
2655 dev_printk(KERN_INFO, &pdev->dev,
2656 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2657 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2658 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2662 * mv_init_one - handle a positive probe of a Marvell host
2663 * @pdev: PCI device found
2664 * @ent: PCI device ID entry for the matched host
2667 * Inherited from caller.
2669 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2671 static int printed_version = 0;
2672 unsigned int board_idx = (unsigned int)ent->driver_data;
2673 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2674 struct ata_host *host;
2675 struct mv_host_priv *hpriv;
2678 if (!printed_version++)
2679 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2682 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2684 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2685 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2686 if (!host || !hpriv)
2688 host->private_data = hpriv;
2690 /* acquire resources */
2691 rc = pcim_enable_device(pdev);
2695 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2697 pcim_pin_device(pdev);
2700 host->iomap = pcim_iomap_table(pdev);
2702 rc = pci_go_64(pdev);
2706 /* initialize adapter */
2707 rc = mv_init_host(host, board_idx);
2711 /* Enable interrupts */
2712 if (msi && pci_enable_msi(pdev))
2715 mv_dump_pci_cfg(pdev, 0x68);
2716 mv_print_info(host);
2718 pci_set_master(pdev);
2719 pci_try_set_mwi(pdev);
2720 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2721 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2724 static int __init mv_init(void)
2726 return pci_register_driver(&mv_pci_driver);
2729 static void __exit mv_exit(void)
2731 pci_unregister_driver(&mv_pci_driver);
2734 MODULE_AUTHOR("Brett Russ");
2735 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2736 MODULE_LICENSE("GPL");
2737 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2738 MODULE_VERSION(DRV_VERSION);
2740 module_param(msi, int, 0444);
2741 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2743 module_init(mv_init);
2744 module_exit(mv_exit);