2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
118 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
119 MV_PORT_HC_SHIFT = 2,
120 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
124 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
125 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
126 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
127 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
128 ATA_FLAG_PIO_POLLING,
129 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
131 CRQB_FLAG_READ = (1 << 0),
133 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
134 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
135 CRQB_CMD_ADDR_SHIFT = 8,
136 CRQB_CMD_CS = (0x2 << 11),
137 CRQB_CMD_LAST = (1 << 15),
139 CRPB_FLAG_STATUS_SHIFT = 8,
140 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
141 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
143 EPRD_FLAG_END_OF_TBL = (1 << 31),
145 /* PCI interface registers */
147 PCI_COMMAND_OFS = 0xc00,
149 PCI_MAIN_CMD_STS_OFS = 0xd30,
150 STOP_PCI_MASTER = (1 << 2),
151 PCI_MASTER_EMPTY = (1 << 3),
152 GLOB_SFT_RST = (1 << 4),
155 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
156 MV_PCI_DISC_TIMER = 0xd04,
157 MV_PCI_MSI_TRIGGER = 0xc38,
158 MV_PCI_SERR_MASK = 0xc28,
159 MV_PCI_XBAR_TMOUT = 0x1d04,
160 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
161 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
162 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
163 MV_PCI_ERR_COMMAND = 0x1d50,
165 PCI_IRQ_CAUSE_OFS = 0x1d58,
166 PCI_IRQ_MASK_OFS = 0x1d5c,
167 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
169 PCIE_IRQ_CAUSE_OFS = 0x1900,
170 PCIE_IRQ_MASK_OFS = 0x1910,
171 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
173 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
174 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
175 PORT0_ERR = (1 << 0), /* shift by port # */
176 PORT0_DONE = (1 << 1), /* shift by port # */
177 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
178 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
180 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
181 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
182 PORTS_0_3_COAL_DONE = (1 << 8),
183 PORTS_4_7_COAL_DONE = (1 << 17),
184 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
185 GPIO_INT = (1 << 22),
186 SELF_INT = (1 << 23),
187 TWSI_INT = (1 << 24),
188 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
189 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
190 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
191 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
193 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 /* SATAHC registers */
199 HC_IRQ_CAUSE_OFS = 0x14,
200 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
201 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
202 DEV_IRQ = (1 << 8), /* shift by port # */
204 /* Shadow block registers */
206 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
209 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
210 SATA_ACTIVE_OFS = 0x350,
211 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
218 SATA_INTERFACE_CTL = 0x050,
220 MV_M2_PREAMP_MASK = 0x7e0,
224 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
225 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
226 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
227 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
228 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
230 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
231 EDMA_ERR_IRQ_MASK_OFS = 0xc,
232 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
233 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
234 EDMA_ERR_DEV = (1 << 2), /* device error */
235 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
236 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
237 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
238 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
239 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
240 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
241 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
242 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
243 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
244 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
245 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
248 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
249 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
251 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
253 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
255 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
256 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
257 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
258 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
259 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
260 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
262 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
264 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
265 EDMA_ERR_OVERRUN_5 = (1 << 5),
266 EDMA_ERR_UNDERRUN_5 = (1 << 6),
268 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
269 EDMA_ERR_LNK_CTRL_RX_1 |
270 EDMA_ERR_LNK_CTRL_RX_3 |
271 EDMA_ERR_LNK_CTRL_TX,
273 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
283 EDMA_ERR_LNK_CTRL_RX_2 |
284 EDMA_ERR_LNK_DATA_RX |
285 EDMA_ERR_LNK_DATA_TX |
286 EDMA_ERR_TRANS_PROTO,
287 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
292 EDMA_ERR_UNDERRUN_5 |
293 EDMA_ERR_SELF_DIS_5 |
299 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
300 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
302 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
303 EDMA_REQ_Q_PTR_SHIFT = 5,
305 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
306 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
307 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
308 EDMA_RSP_Q_PTR_SHIFT = 3,
310 EDMA_CMD_OFS = 0x28, /* EDMA command register */
311 EDMA_EN = (1 << 0), /* enable EDMA */
312 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
313 ATA_RST = (1 << 2), /* reset trans/link/phy */
315 EDMA_IORDY_TMOUT = 0x34,
318 /* Host private flags (hp_flags) */
319 MV_HP_FLAG_MSI = (1 << 0),
320 MV_HP_ERRATA_50XXB0 = (1 << 1),
321 MV_HP_ERRATA_50XXB2 = (1 << 2),
322 MV_HP_ERRATA_60X1B2 = (1 << 3),
323 MV_HP_ERRATA_60X1C0 = (1 << 4),
324 MV_HP_ERRATA_XX42A0 = (1 << 5),
325 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
326 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
327 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
328 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
330 /* Port private flags (pp_flags) */
331 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
332 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
333 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
336 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
337 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
338 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
341 /* DMA boundary 0xffff is required by the s/g splitting
342 * we need on /length/ in mv_fill-sg().
344 MV_DMA_BOUNDARY = 0xffffU,
346 /* mask of register bits containing lower 32 bits
347 * of EDMA request queue DMA address
349 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
351 /* ditto, for response queue */
352 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
365 /* Command ReQuest Block: 32B */
381 /* Command ResPonse Block: 8B */
388 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
396 struct mv_port_priv {
397 struct mv_crqb *crqb;
399 struct mv_crpb *crpb;
401 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
402 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
404 unsigned int req_idx;
405 unsigned int resp_idx;
410 struct mv_port_signal {
415 struct mv_host_priv {
417 struct mv_port_signal signal[8];
418 const struct mv_hw_ops *ops;
423 * These consistent DMA memory pools give us guaranteed
424 * alignment for hardware-accessed data structures,
425 * and less memory waste in accomplishing the alignment.
427 struct dma_pool *crqb_pool;
428 struct dma_pool *crpb_pool;
429 struct dma_pool *sg_tbl_pool;
433 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
435 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
436 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
438 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
440 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
441 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
444 static void mv_irq_clear(struct ata_port *ap);
445 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
446 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
447 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
448 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
449 static int mv_port_start(struct ata_port *ap);
450 static void mv_port_stop(struct ata_port *ap);
451 static void mv_qc_prep(struct ata_queued_cmd *qc);
452 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
453 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
454 static void mv_error_handler(struct ata_port *ap);
455 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
456 static void mv_eh_freeze(struct ata_port *ap);
457 static void mv_eh_thaw(struct ata_port *ap);
458 static void mv6_dev_config(struct ata_device *dev);
459 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
461 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
463 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
464 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
466 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
468 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
469 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
471 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
473 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
474 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
476 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
478 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
479 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
480 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
481 unsigned int port_no);
482 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
483 void __iomem *port_mmio, int want_ncq);
484 static int __mv_stop_dma(struct ata_port *ap);
486 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
487 * because we have to allow room for worst case splitting of
488 * PRDs for 64K boundaries in mv_fill_sg().
490 static struct scsi_host_template mv5_sht = {
491 .module = THIS_MODULE,
493 .ioctl = ata_scsi_ioctl,
494 .queuecommand = ata_scsi_queuecmd,
495 .can_queue = ATA_DEF_QUEUE,
496 .this_id = ATA_SHT_THIS_ID,
497 .sg_tablesize = MV_MAX_SG_CT / 2,
498 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
499 .emulated = ATA_SHT_EMULATED,
501 .proc_name = DRV_NAME,
502 .dma_boundary = MV_DMA_BOUNDARY,
503 .slave_configure = ata_scsi_slave_config,
504 .slave_destroy = ata_scsi_slave_destroy,
505 .bios_param = ata_std_bios_param,
508 static struct scsi_host_template mv6_sht = {
509 .module = THIS_MODULE,
511 .ioctl = ata_scsi_ioctl,
512 .queuecommand = ata_scsi_queuecmd,
513 .change_queue_depth = ata_scsi_change_queue_depth,
514 .can_queue = MV_MAX_Q_DEPTH - 1,
515 .this_id = ATA_SHT_THIS_ID,
516 .sg_tablesize = MV_MAX_SG_CT / 2,
517 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
518 .emulated = ATA_SHT_EMULATED,
520 .proc_name = DRV_NAME,
521 .dma_boundary = MV_DMA_BOUNDARY,
522 .slave_configure = ata_scsi_slave_config,
523 .slave_destroy = ata_scsi_slave_destroy,
524 .bios_param = ata_std_bios_param,
527 static const struct ata_port_operations mv5_ops = {
528 .tf_load = ata_tf_load,
529 .tf_read = ata_tf_read,
530 .check_status = ata_check_status,
531 .exec_command = ata_exec_command,
532 .dev_select = ata_std_dev_select,
534 .cable_detect = ata_cable_sata,
536 .qc_prep = mv_qc_prep,
537 .qc_issue = mv_qc_issue,
538 .data_xfer = ata_data_xfer,
540 .irq_clear = mv_irq_clear,
541 .irq_on = ata_irq_on,
543 .error_handler = mv_error_handler,
544 .post_internal_cmd = mv_post_int_cmd,
545 .freeze = mv_eh_freeze,
548 .scr_read = mv5_scr_read,
549 .scr_write = mv5_scr_write,
551 .port_start = mv_port_start,
552 .port_stop = mv_port_stop,
555 static const struct ata_port_operations mv6_ops = {
556 .dev_config = mv6_dev_config,
557 .tf_load = ata_tf_load,
558 .tf_read = ata_tf_read,
559 .check_status = ata_check_status,
560 .exec_command = ata_exec_command,
561 .dev_select = ata_std_dev_select,
563 .cable_detect = ata_cable_sata,
565 .qc_prep = mv_qc_prep,
566 .qc_issue = mv_qc_issue,
567 .data_xfer = ata_data_xfer,
569 .irq_clear = mv_irq_clear,
570 .irq_on = ata_irq_on,
572 .error_handler = mv_error_handler,
573 .post_internal_cmd = mv_post_int_cmd,
574 .freeze = mv_eh_freeze,
576 .qc_defer = ata_std_qc_defer,
578 .scr_read = mv_scr_read,
579 .scr_write = mv_scr_write,
581 .port_start = mv_port_start,
582 .port_stop = mv_port_stop,
585 static const struct ata_port_operations mv_iie_ops = {
586 .tf_load = ata_tf_load,
587 .tf_read = ata_tf_read,
588 .check_status = ata_check_status,
589 .exec_command = ata_exec_command,
590 .dev_select = ata_std_dev_select,
592 .cable_detect = ata_cable_sata,
594 .qc_prep = mv_qc_prep_iie,
595 .qc_issue = mv_qc_issue,
596 .data_xfer = ata_data_xfer,
598 .irq_clear = mv_irq_clear,
599 .irq_on = ata_irq_on,
601 .error_handler = mv_error_handler,
602 .post_internal_cmd = mv_post_int_cmd,
603 .freeze = mv_eh_freeze,
605 .qc_defer = ata_std_qc_defer,
607 .scr_read = mv_scr_read,
608 .scr_write = mv_scr_write,
610 .port_start = mv_port_start,
611 .port_stop = mv_port_stop,
614 static const struct ata_port_info mv_port_info[] = {
616 .flags = MV_COMMON_FLAGS,
617 .pio_mask = 0x1f, /* pio0-4 */
618 .udma_mask = ATA_UDMA6,
619 .port_ops = &mv5_ops,
622 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
623 .pio_mask = 0x1f, /* pio0-4 */
624 .udma_mask = ATA_UDMA6,
625 .port_ops = &mv5_ops,
628 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
629 .pio_mask = 0x1f, /* pio0-4 */
630 .udma_mask = ATA_UDMA6,
631 .port_ops = &mv5_ops,
634 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
636 .pio_mask = 0x1f, /* pio0-4 */
637 .udma_mask = ATA_UDMA6,
638 .port_ops = &mv6_ops,
641 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
642 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
643 .pio_mask = 0x1f, /* pio0-4 */
644 .udma_mask = ATA_UDMA6,
645 .port_ops = &mv6_ops,
648 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
650 .pio_mask = 0x1f, /* pio0-4 */
651 .udma_mask = ATA_UDMA6,
652 .port_ops = &mv_iie_ops,
655 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
657 .pio_mask = 0x1f, /* pio0-4 */
658 .udma_mask = ATA_UDMA6,
659 .port_ops = &mv_iie_ops,
663 static const struct pci_device_id mv_pci_tbl[] = {
664 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
665 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
666 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
667 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
668 /* RocketRAID 1740/174x have different identifiers */
669 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
670 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
672 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
673 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
674 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
675 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
676 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
678 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
681 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
683 /* Marvell 7042 support */
684 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
686 /* Highpoint RocketRAID PCIe series */
687 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
688 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
690 { } /* terminate list */
693 static struct pci_driver mv_pci_driver = {
695 .id_table = mv_pci_tbl,
696 .probe = mv_init_one,
697 .remove = ata_pci_remove_one,
700 static const struct mv_hw_ops mv5xxx_ops = {
701 .phy_errata = mv5_phy_errata,
702 .enable_leds = mv5_enable_leds,
703 .read_preamp = mv5_read_preamp,
704 .reset_hc = mv5_reset_hc,
705 .reset_flash = mv5_reset_flash,
706 .reset_bus = mv5_reset_bus,
709 static const struct mv_hw_ops mv6xxx_ops = {
710 .phy_errata = mv6_phy_errata,
711 .enable_leds = mv6_enable_leds,
712 .read_preamp = mv6_read_preamp,
713 .reset_hc = mv6_reset_hc,
714 .reset_flash = mv6_reset_flash,
715 .reset_bus = mv_reset_pci_bus,
721 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
724 /* move to PCI layer or libata core? */
725 static int pci_go_64(struct pci_dev *pdev)
729 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
730 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
732 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
734 dev_printk(KERN_ERR, &pdev->dev,
735 "64-bit DMA enable failed\n");
740 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
742 dev_printk(KERN_ERR, &pdev->dev,
743 "32-bit DMA enable failed\n");
746 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
748 dev_printk(KERN_ERR, &pdev->dev,
749 "32-bit consistent DMA enable failed\n");
761 static inline void writelfl(unsigned long data, void __iomem *addr)
764 (void) readl(addr); /* flush to avoid PCI posted write */
767 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
769 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
772 static inline unsigned int mv_hc_from_port(unsigned int port)
774 return port >> MV_PORT_HC_SHIFT;
777 static inline unsigned int mv_hardport_from_port(unsigned int port)
779 return port & MV_PORT_MASK;
782 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
785 return mv_hc_base(base, mv_hc_from_port(port));
788 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
790 return mv_hc_base_from_port(base, port) +
791 MV_SATAHC_ARBTR_REG_SZ +
792 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
795 static inline void __iomem *mv_ap_base(struct ata_port *ap)
797 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
800 static inline int mv_get_hc_count(unsigned long port_flags)
802 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
805 static void mv_irq_clear(struct ata_port *ap)
809 static void mv_set_edma_ptrs(void __iomem *port_mmio,
810 struct mv_host_priv *hpriv,
811 struct mv_port_priv *pp)
816 * initialize request queue
818 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
820 WARN_ON(pp->crqb_dma & 0x3ff);
821 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
822 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
823 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
825 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
826 writelfl((pp->crqb_dma & 0xffffffff) | index,
827 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
829 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
832 * initialize response queue
834 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
836 WARN_ON(pp->crpb_dma & 0xff);
837 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
839 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
840 writelfl((pp->crpb_dma & 0xffffffff) | index,
841 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
843 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
845 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
846 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
850 * mv_start_dma - Enable eDMA engine
851 * @base: port base address
852 * @pp: port private data
854 * Verify the local cache of the eDMA state is accurate with a
858 * Inherited from caller.
860 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
861 struct mv_port_priv *pp, u8 protocol)
863 int want_ncq = (protocol == ATA_PROT_NCQ);
865 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
866 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
867 if (want_ncq != using_ncq)
870 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
871 struct mv_host_priv *hpriv = ap->host->private_data;
872 int hard_port = mv_hardport_from_port(ap->port_no);
873 void __iomem *hc_mmio = mv_hc_base_from_port(
874 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
875 u32 hc_irq_cause, ipending;
877 /* clear EDMA event indicators, if any */
878 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
880 /* clear EDMA interrupt indicator, if any */
881 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
882 ipending = (DEV_IRQ << hard_port) |
883 (CRPB_DMA_DONE << hard_port);
884 if (hc_irq_cause & ipending) {
885 writelfl(hc_irq_cause & ~ipending,
886 hc_mmio + HC_IRQ_CAUSE_OFS);
889 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
891 /* clear FIS IRQ Cause */
892 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
894 mv_set_edma_ptrs(port_mmio, hpriv, pp);
896 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
897 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
899 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
903 * __mv_stop_dma - Disable eDMA engine
904 * @ap: ATA channel to manipulate
906 * Verify the local cache of the eDMA state is accurate with a
910 * Inherited from caller.
912 static int __mv_stop_dma(struct ata_port *ap)
914 void __iomem *port_mmio = mv_ap_base(ap);
915 struct mv_port_priv *pp = ap->private_data;
919 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
920 /* Disable EDMA if active. The disable bit auto clears.
922 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
923 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
925 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
928 /* now properly wait for the eDMA to stop */
929 for (i = 1000; i > 0; i--) {
930 reg = readl(port_mmio + EDMA_CMD_OFS);
931 if (!(reg & EDMA_EN))
938 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
945 static int mv_stop_dma(struct ata_port *ap)
950 spin_lock_irqsave(&ap->host->lock, flags);
951 rc = __mv_stop_dma(ap);
952 spin_unlock_irqrestore(&ap->host->lock, flags);
958 static void mv_dump_mem(void __iomem *start, unsigned bytes)
961 for (b = 0; b < bytes; ) {
962 DPRINTK("%p: ", start + b);
963 for (w = 0; b < bytes && w < 4; w++) {
964 printk("%08x ", readl(start + b));
972 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
977 for (b = 0; b < bytes; ) {
978 DPRINTK("%02x: ", b);
979 for (w = 0; b < bytes && w < 4; w++) {
980 (void) pci_read_config_dword(pdev, b, &dw);
988 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
989 struct pci_dev *pdev)
992 void __iomem *hc_base = mv_hc_base(mmio_base,
993 port >> MV_PORT_HC_SHIFT);
994 void __iomem *port_base;
995 int start_port, num_ports, p, start_hc, num_hcs, hc;
998 start_hc = start_port = 0;
999 num_ports = 8; /* shld be benign for 4 port devs */
1002 start_hc = port >> MV_PORT_HC_SHIFT;
1004 num_ports = num_hcs = 1;
1006 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1007 num_ports > 1 ? num_ports - 1 : start_port);
1010 DPRINTK("PCI config space regs:\n");
1011 mv_dump_pci_cfg(pdev, 0x68);
1013 DPRINTK("PCI regs:\n");
1014 mv_dump_mem(mmio_base+0xc00, 0x3c);
1015 mv_dump_mem(mmio_base+0xd00, 0x34);
1016 mv_dump_mem(mmio_base+0xf00, 0x4);
1017 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1018 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1019 hc_base = mv_hc_base(mmio_base, hc);
1020 DPRINTK("HC regs (HC %i):\n", hc);
1021 mv_dump_mem(hc_base, 0x1c);
1023 for (p = start_port; p < start_port + num_ports; p++) {
1024 port_base = mv_port_base(mmio_base, p);
1025 DPRINTK("EDMA regs (port %i):\n", p);
1026 mv_dump_mem(port_base, 0x54);
1027 DPRINTK("SATA regs (port %i):\n", p);
1028 mv_dump_mem(port_base+0x300, 0x60);
1033 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1037 switch (sc_reg_in) {
1041 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1044 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1053 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1055 unsigned int ofs = mv_scr_offset(sc_reg_in);
1057 if (ofs != 0xffffffffU) {
1058 *val = readl(mv_ap_base(ap) + ofs);
1064 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1066 unsigned int ofs = mv_scr_offset(sc_reg_in);
1068 if (ofs != 0xffffffffU) {
1069 writelfl(val, mv_ap_base(ap) + ofs);
1075 static void mv6_dev_config(struct ata_device *adev)
1078 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1079 * See mv_qc_prep() for more info.
1081 if (adev->flags & ATA_DFLAG_NCQ)
1082 if (adev->max_sectors > ATA_MAX_SECTORS)
1083 adev->max_sectors = ATA_MAX_SECTORS;
1086 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1087 void __iomem *port_mmio, int want_ncq)
1091 /* set up non-NCQ EDMA configuration */
1092 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1094 if (IS_GEN_I(hpriv))
1095 cfg |= (1 << 8); /* enab config burst size mask */
1097 else if (IS_GEN_II(hpriv))
1098 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1100 else if (IS_GEN_IIE(hpriv)) {
1101 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1102 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1103 cfg |= (1 << 18); /* enab early completion */
1104 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1108 cfg |= EDMA_CFG_NCQ;
1109 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1111 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1113 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1116 static void mv_port_free_dma_mem(struct ata_port *ap)
1118 struct mv_host_priv *hpriv = ap->host->private_data;
1119 struct mv_port_priv *pp = ap->private_data;
1123 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1127 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1131 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1132 * For later hardware, we have one unique sg_tbl per NCQ tag.
1134 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1135 if (pp->sg_tbl[tag]) {
1136 if (tag == 0 || !IS_GEN_I(hpriv))
1137 dma_pool_free(hpriv->sg_tbl_pool,
1139 pp->sg_tbl_dma[tag]);
1140 pp->sg_tbl[tag] = NULL;
1146 * mv_port_start - Port specific init/start routine.
1147 * @ap: ATA channel to manipulate
1149 * Allocate and point to DMA memory, init port private memory,
1153 * Inherited from caller.
1155 static int mv_port_start(struct ata_port *ap)
1157 struct device *dev = ap->host->dev;
1158 struct mv_host_priv *hpriv = ap->host->private_data;
1159 struct mv_port_priv *pp;
1160 void __iomem *port_mmio = mv_ap_base(ap);
1161 unsigned long flags;
1164 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1167 ap->private_data = pp;
1169 rc = ata_pad_alloc(ap, dev);
1173 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1176 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1178 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1180 goto out_port_free_dma_mem;
1181 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1184 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1185 * For later hardware, we need one unique sg_tbl per NCQ tag.
1187 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1188 if (tag == 0 || !IS_GEN_I(hpriv)) {
1189 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1190 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1191 if (!pp->sg_tbl[tag])
1192 goto out_port_free_dma_mem;
1194 pp->sg_tbl[tag] = pp->sg_tbl[0];
1195 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1199 spin_lock_irqsave(&ap->host->lock, flags);
1201 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1202 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1204 spin_unlock_irqrestore(&ap->host->lock, flags);
1206 /* Don't turn on EDMA here...do it before DMA commands only. Else
1207 * we'll be unable to send non-data, PIO, etc due to restricted access
1212 out_port_free_dma_mem:
1213 mv_port_free_dma_mem(ap);
1218 * mv_port_stop - Port specific cleanup/stop routine.
1219 * @ap: ATA channel to manipulate
1221 * Stop DMA, cleanup port memory.
1224 * This routine uses the host lock to protect the DMA stop.
1226 static void mv_port_stop(struct ata_port *ap)
1229 mv_port_free_dma_mem(ap);
1233 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1234 * @qc: queued command whose SG list to source from
1236 * Populate the SG list and mark the last entry.
1239 * Inherited from caller.
1241 static void mv_fill_sg(struct ata_queued_cmd *qc)
1243 struct mv_port_priv *pp = qc->ap->private_data;
1244 struct scatterlist *sg;
1245 struct mv_sg *mv_sg, *last_sg = NULL;
1248 mv_sg = pp->sg_tbl[qc->tag];
1249 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1250 dma_addr_t addr = sg_dma_address(sg);
1251 u32 sg_len = sg_dma_len(sg);
1254 u32 offset = addr & 0xffff;
1257 if ((offset + sg_len > 0x10000))
1258 len = 0x10000 - offset;
1260 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1261 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1262 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1272 if (likely(last_sg))
1273 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1276 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1278 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1279 (last ? CRQB_CMD_LAST : 0);
1280 *cmdw = cpu_to_le16(tmp);
1284 * mv_qc_prep - Host specific command preparation.
1285 * @qc: queued command to prepare
1287 * This routine simply redirects to the general purpose routine
1288 * if command is not DMA. Else, it handles prep of the CRQB
1289 * (command request block), does some sanity checking, and calls
1290 * the SG load routine.
1293 * Inherited from caller.
1295 static void mv_qc_prep(struct ata_queued_cmd *qc)
1297 struct ata_port *ap = qc->ap;
1298 struct mv_port_priv *pp = ap->private_data;
1300 struct ata_taskfile *tf;
1304 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1305 (qc->tf.protocol != ATA_PROT_NCQ))
1308 /* Fill in command request block
1310 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1311 flags |= CRQB_FLAG_READ;
1312 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1313 flags |= qc->tag << CRQB_TAG_SHIFT;
1315 /* get current queue index from software */
1316 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1318 pp->crqb[in_index].sg_addr =
1319 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1320 pp->crqb[in_index].sg_addr_hi =
1321 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1322 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1324 cw = &pp->crqb[in_index].ata_cmd[0];
1327 /* Sadly, the CRQB cannot accomodate all registers--there are
1328 * only 11 bytes...so we must pick and choose required
1329 * registers based on the command. So, we drop feature and
1330 * hob_feature for [RW] DMA commands, but they are needed for
1331 * NCQ. NCQ will drop hob_nsect.
1333 switch (tf->command) {
1335 case ATA_CMD_READ_EXT:
1337 case ATA_CMD_WRITE_EXT:
1338 case ATA_CMD_WRITE_FUA_EXT:
1339 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1341 case ATA_CMD_FPDMA_READ:
1342 case ATA_CMD_FPDMA_WRITE:
1343 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1344 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1347 /* The only other commands EDMA supports in non-queued and
1348 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1349 * of which are defined/used by Linux. If we get here, this
1350 * driver needs work.
1352 * FIXME: modify libata to give qc_prep a return value and
1353 * return error here.
1355 BUG_ON(tf->command);
1358 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1359 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1361 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1363 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1365 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1366 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1368 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1374 * mv_qc_prep_iie - Host specific command preparation.
1375 * @qc: queued command to prepare
1377 * This routine simply redirects to the general purpose routine
1378 * if command is not DMA. Else, it handles prep of the CRQB
1379 * (command request block), does some sanity checking, and calls
1380 * the SG load routine.
1383 * Inherited from caller.
1385 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1387 struct ata_port *ap = qc->ap;
1388 struct mv_port_priv *pp = ap->private_data;
1389 struct mv_crqb_iie *crqb;
1390 struct ata_taskfile *tf;
1394 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1395 (qc->tf.protocol != ATA_PROT_NCQ))
1398 /* Fill in Gen IIE command request block
1400 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1401 flags |= CRQB_FLAG_READ;
1403 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1404 flags |= qc->tag << CRQB_TAG_SHIFT;
1405 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1407 /* get current queue index from software */
1408 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1410 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1411 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1412 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1413 crqb->flags = cpu_to_le32(flags);
1416 crqb->ata_cmd[0] = cpu_to_le32(
1417 (tf->command << 16) |
1420 crqb->ata_cmd[1] = cpu_to_le32(
1426 crqb->ata_cmd[2] = cpu_to_le32(
1427 (tf->hob_lbal << 0) |
1428 (tf->hob_lbam << 8) |
1429 (tf->hob_lbah << 16) |
1430 (tf->hob_feature << 24)
1432 crqb->ata_cmd[3] = cpu_to_le32(
1434 (tf->hob_nsect << 8)
1437 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1443 * mv_qc_issue - Initiate a command to the host
1444 * @qc: queued command to start
1446 * This routine simply redirects to the general purpose routine
1447 * if command is not DMA. Else, it sanity checks our local
1448 * caches of the request producer/consumer indices then enables
1449 * DMA and bumps the request producer index.
1452 * Inherited from caller.
1454 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1456 struct ata_port *ap = qc->ap;
1457 void __iomem *port_mmio = mv_ap_base(ap);
1458 struct mv_port_priv *pp = ap->private_data;
1461 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1462 (qc->tf.protocol != ATA_PROT_NCQ)) {
1463 /* We're about to send a non-EDMA capable command to the
1464 * port. Turn off EDMA so there won't be problems accessing
1465 * shadow block, etc registers.
1468 return ata_qc_issue_prot(qc);
1471 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1475 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1477 /* and write the request in pointer to kick the EDMA to life */
1478 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1479 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1485 * mv_err_intr - Handle error interrupts on the port
1486 * @ap: ATA channel to manipulate
1487 * @reset_allowed: bool: 0 == don't trigger from reset here
1489 * In most cases, just clear the interrupt and move on. However,
1490 * some cases require an eDMA reset, which is done right before
1491 * the COMRESET in mv_phy_reset(). The SERR case requires a
1492 * clear of pending errors in the SATA SERROR register. Finally,
1493 * if the port disabled DMA, update our cached copy to match.
1496 * Inherited from caller.
1498 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1500 void __iomem *port_mmio = mv_ap_base(ap);
1501 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1502 struct mv_port_priv *pp = ap->private_data;
1503 struct mv_host_priv *hpriv = ap->host->private_data;
1504 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1505 unsigned int action = 0, err_mask = 0;
1506 struct ata_eh_info *ehi = &ap->link.eh_info;
1508 ata_ehi_clear_desc(ehi);
1510 if (!edma_enabled) {
1511 /* just a guess: do we need to do this? should we
1512 * expand this, and do it in all cases?
1514 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1515 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1518 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1520 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1523 * all generations share these EDMA error cause bits
1526 if (edma_err_cause & EDMA_ERR_DEV)
1527 err_mask |= AC_ERR_DEV;
1528 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1529 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1530 EDMA_ERR_INTRL_PAR)) {
1531 err_mask |= AC_ERR_ATA_BUS;
1532 action |= ATA_EH_HARDRESET;
1533 ata_ehi_push_desc(ehi, "parity error");
1535 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1536 ata_ehi_hotplugged(ehi);
1537 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1538 "dev disconnect" : "dev connect");
1539 action |= ATA_EH_HARDRESET;
1542 if (IS_GEN_I(hpriv)) {
1543 eh_freeze_mask = EDMA_EH_FREEZE_5;
1545 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1546 struct mv_port_priv *pp = ap->private_data;
1547 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1548 ata_ehi_push_desc(ehi, "EDMA self-disable");
1551 eh_freeze_mask = EDMA_EH_FREEZE;
1553 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1554 struct mv_port_priv *pp = ap->private_data;
1555 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1556 ata_ehi_push_desc(ehi, "EDMA self-disable");
1559 if (edma_err_cause & EDMA_ERR_SERR) {
1560 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1561 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1562 err_mask = AC_ERR_ATA_BUS;
1563 action |= ATA_EH_HARDRESET;
1567 /* Clear EDMA now that SERR cleanup done */
1568 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1571 err_mask = AC_ERR_OTHER;
1572 action |= ATA_EH_HARDRESET;
1575 ehi->serror |= serr;
1576 ehi->action |= action;
1579 qc->err_mask |= err_mask;
1581 ehi->err_mask |= err_mask;
1583 if (edma_err_cause & eh_freeze_mask)
1584 ata_port_freeze(ap);
1589 static void mv_intr_pio(struct ata_port *ap)
1591 struct ata_queued_cmd *qc;
1594 /* ignore spurious intr if drive still BUSY */
1595 ata_status = readb(ap->ioaddr.status_addr);
1596 if (unlikely(ata_status & ATA_BUSY))
1599 /* get active ATA command */
1600 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1601 if (unlikely(!qc)) /* no active tag */
1603 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1606 /* and finally, complete the ATA command */
1607 qc->err_mask |= ac_err_mask(ata_status);
1608 ata_qc_complete(qc);
1611 static void mv_intr_edma(struct ata_port *ap)
1613 void __iomem *port_mmio = mv_ap_base(ap);
1614 struct mv_host_priv *hpriv = ap->host->private_data;
1615 struct mv_port_priv *pp = ap->private_data;
1616 struct ata_queued_cmd *qc;
1617 u32 out_index, in_index;
1618 bool work_done = false;
1620 /* get h/w response queue pointer */
1621 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1622 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1628 /* get s/w response queue last-read pointer, and compare */
1629 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1630 if (in_index == out_index)
1633 /* 50xx: get active ATA command */
1634 if (IS_GEN_I(hpriv))
1635 tag = ap->link.active_tag;
1637 /* Gen II/IIE: get active ATA command via tag, to enable
1638 * support for queueing. this works transparently for
1639 * queued and non-queued modes.
1642 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1644 qc = ata_qc_from_tag(ap, tag);
1646 /* For non-NCQ mode, the lower 8 bits of status
1647 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1648 * which should be zero if all went well.
1650 status = le16_to_cpu(pp->crpb[out_index].flags);
1651 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1652 mv_err_intr(ap, qc);
1656 /* and finally, complete the ATA command */
1659 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1660 ata_qc_complete(qc);
1663 /* advance software response queue pointer, to
1664 * indicate (after the loop completes) to hardware
1665 * that we have consumed a response queue entry.
1672 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1673 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1674 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1678 * mv_host_intr - Handle all interrupts on the given host controller
1679 * @host: host specific structure
1680 * @relevant: port error bits relevant to this host controller
1681 * @hc: which host controller we're to look at
1683 * Read then write clear the HC interrupt status then walk each
1684 * port connected to the HC and see if it needs servicing. Port
1685 * success ints are reported in the HC interrupt status reg, the
1686 * port error ints are reported in the higher level main
1687 * interrupt status register and thus are passed in via the
1688 * 'relevant' argument.
1691 * Inherited from caller.
1693 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1695 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1696 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1703 port0 = MV_PORTS_PER_HC;
1705 /* we'll need the HC success int register in most cases */
1706 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1710 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1712 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1713 hc, relevant, hc_irq_cause);
1715 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1716 struct ata_port *ap = host->ports[port];
1717 struct mv_port_priv *pp = ap->private_data;
1718 int have_err_bits, hard_port, shift;
1720 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1723 shift = port << 1; /* (port * 2) */
1724 if (port >= MV_PORTS_PER_HC) {
1725 shift++; /* skip bit 8 in the HC Main IRQ reg */
1727 have_err_bits = ((PORT0_ERR << shift) & relevant);
1729 if (unlikely(have_err_bits)) {
1730 struct ata_queued_cmd *qc;
1732 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1733 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1736 mv_err_intr(ap, qc);
1740 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1742 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1743 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1746 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1753 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1755 struct mv_host_priv *hpriv = host->private_data;
1756 struct ata_port *ap;
1757 struct ata_queued_cmd *qc;
1758 struct ata_eh_info *ehi;
1759 unsigned int i, err_mask, printed = 0;
1762 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1764 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1767 DPRINTK("All regs @ PCI error\n");
1768 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1770 writelfl(0, mmio + hpriv->irq_cause_ofs);
1772 for (i = 0; i < host->n_ports; i++) {
1773 ap = host->ports[i];
1774 if (!ata_link_offline(&ap->link)) {
1775 ehi = &ap->link.eh_info;
1776 ata_ehi_clear_desc(ehi);
1778 ata_ehi_push_desc(ehi,
1779 "PCI err cause 0x%08x", err_cause);
1780 err_mask = AC_ERR_HOST_BUS;
1781 ehi->action = ATA_EH_HARDRESET;
1782 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1784 qc->err_mask |= err_mask;
1786 ehi->err_mask |= err_mask;
1788 ata_port_freeze(ap);
1794 * mv_interrupt - Main interrupt event handler
1796 * @dev_instance: private data; in this case the host structure
1798 * Read the read only register to determine if any host
1799 * controllers have pending interrupts. If so, call lower level
1800 * routine to handle. Also check for PCI errors which are only
1804 * This routine holds the host lock while processing pending
1807 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1809 struct ata_host *host = dev_instance;
1810 unsigned int hc, handled = 0, n_hcs;
1811 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1812 u32 irq_stat, irq_mask;
1814 spin_lock(&host->lock);
1815 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1816 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1818 /* check the cases where we either have nothing pending or have read
1819 * a bogus register value which can indicate HW removal or PCI fault
1821 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1824 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1826 if (unlikely(irq_stat & PCI_ERR)) {
1827 mv_pci_error(host, mmio);
1829 goto out_unlock; /* skip all other HC irq handling */
1832 for (hc = 0; hc < n_hcs; hc++) {
1833 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1835 mv_host_intr(host, relevant, hc);
1841 spin_unlock(&host->lock);
1843 return IRQ_RETVAL(handled);
1846 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1848 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1849 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1851 return hc_mmio + ofs;
1854 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1858 switch (sc_reg_in) {
1862 ofs = sc_reg_in * sizeof(u32);
1871 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1873 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1874 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1875 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1877 if (ofs != 0xffffffffU) {
1878 *val = readl(addr + ofs);
1884 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1886 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1887 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1888 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1890 if (ofs != 0xffffffffU) {
1891 writelfl(val, addr + ofs);
1897 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1901 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1904 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1906 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1909 mv_reset_pci_bus(pdev, mmio);
1912 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1914 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1917 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1920 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1923 tmp = readl(phy_mmio + MV5_PHY_MODE);
1925 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1926 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1929 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1933 writel(0, mmio + MV_GPIO_PORT_CTL);
1935 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1937 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1939 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1942 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1945 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1946 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1948 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1951 tmp = readl(phy_mmio + MV5_LT_MODE);
1953 writel(tmp, phy_mmio + MV5_LT_MODE);
1955 tmp = readl(phy_mmio + MV5_PHY_CTL);
1958 writel(tmp, phy_mmio + MV5_PHY_CTL);
1961 tmp = readl(phy_mmio + MV5_PHY_MODE);
1963 tmp |= hpriv->signal[port].pre;
1964 tmp |= hpriv->signal[port].amps;
1965 writel(tmp, phy_mmio + MV5_PHY_MODE);
1970 #define ZERO(reg) writel(0, port_mmio + (reg))
1971 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1974 void __iomem *port_mmio = mv_port_base(mmio, port);
1976 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1978 mv_channel_reset(hpriv, mmio, port);
1980 ZERO(0x028); /* command */
1981 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1982 ZERO(0x004); /* timer */
1983 ZERO(0x008); /* irq err cause */
1984 ZERO(0x00c); /* irq err mask */
1985 ZERO(0x010); /* rq bah */
1986 ZERO(0x014); /* rq inp */
1987 ZERO(0x018); /* rq outp */
1988 ZERO(0x01c); /* respq bah */
1989 ZERO(0x024); /* respq outp */
1990 ZERO(0x020); /* respq inp */
1991 ZERO(0x02c); /* test control */
1992 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1996 #define ZERO(reg) writel(0, hc_mmio + (reg))
1997 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2000 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2008 tmp = readl(hc_mmio + 0x20);
2011 writel(tmp, hc_mmio + 0x20);
2015 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2018 unsigned int hc, port;
2020 for (hc = 0; hc < n_hc; hc++) {
2021 for (port = 0; port < MV_PORTS_PER_HC; port++)
2022 mv5_reset_hc_port(hpriv, mmio,
2023 (hc * MV_PORTS_PER_HC) + port);
2025 mv5_reset_one_hc(hpriv, mmio, hc);
2032 #define ZERO(reg) writel(0, mmio + (reg))
2033 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
2035 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2036 struct mv_host_priv *hpriv = host->private_data;
2039 tmp = readl(mmio + MV_PCI_MODE);
2041 writel(tmp, mmio + MV_PCI_MODE);
2043 ZERO(MV_PCI_DISC_TIMER);
2044 ZERO(MV_PCI_MSI_TRIGGER);
2045 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2046 ZERO(HC_MAIN_IRQ_MASK_OFS);
2047 ZERO(MV_PCI_SERR_MASK);
2048 ZERO(hpriv->irq_cause_ofs);
2049 ZERO(hpriv->irq_mask_ofs);
2050 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2051 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2052 ZERO(MV_PCI_ERR_ATTRIBUTE);
2053 ZERO(MV_PCI_ERR_COMMAND);
2057 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2061 mv5_reset_flash(hpriv, mmio);
2063 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2065 tmp |= (1 << 5) | (1 << 6);
2066 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2070 * mv6_reset_hc - Perform the 6xxx global soft reset
2071 * @mmio: base address of the HBA
2073 * This routine only applies to 6xxx parts.
2076 * Inherited from caller.
2078 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2081 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2085 /* Following procedure defined in PCI "main command and status
2089 writel(t | STOP_PCI_MASTER, reg);
2091 for (i = 0; i < 1000; i++) {
2094 if (PCI_MASTER_EMPTY & t)
2097 if (!(PCI_MASTER_EMPTY & t)) {
2098 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2106 writel(t | GLOB_SFT_RST, reg);
2109 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2111 if (!(GLOB_SFT_RST & t)) {
2112 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2117 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2120 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2123 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2125 if (GLOB_SFT_RST & t) {
2126 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2133 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2136 void __iomem *port_mmio;
2139 tmp = readl(mmio + MV_RESET_CFG);
2140 if ((tmp & (1 << 0)) == 0) {
2141 hpriv->signal[idx].amps = 0x7 << 8;
2142 hpriv->signal[idx].pre = 0x1 << 5;
2146 port_mmio = mv_port_base(mmio, idx);
2147 tmp = readl(port_mmio + PHY_MODE2);
2149 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2150 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2153 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2155 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2158 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2161 void __iomem *port_mmio = mv_port_base(mmio, port);
2163 u32 hp_flags = hpriv->hp_flags;
2165 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2167 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2170 if (fix_phy_mode2) {
2171 m2 = readl(port_mmio + PHY_MODE2);
2174 writel(m2, port_mmio + PHY_MODE2);
2178 m2 = readl(port_mmio + PHY_MODE2);
2179 m2 &= ~((1 << 16) | (1 << 31));
2180 writel(m2, port_mmio + PHY_MODE2);
2185 /* who knows what this magic does */
2186 tmp = readl(port_mmio + PHY_MODE3);
2189 writel(tmp, port_mmio + PHY_MODE3);
2191 if (fix_phy_mode4) {
2194 m4 = readl(port_mmio + PHY_MODE4);
2196 if (hp_flags & MV_HP_ERRATA_60X1B2)
2197 tmp = readl(port_mmio + 0x310);
2199 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2201 writel(m4, port_mmio + PHY_MODE4);
2203 if (hp_flags & MV_HP_ERRATA_60X1B2)
2204 writel(tmp, port_mmio + 0x310);
2207 /* Revert values of pre-emphasis and signal amps to the saved ones */
2208 m2 = readl(port_mmio + PHY_MODE2);
2210 m2 &= ~MV_M2_PREAMP_MASK;
2211 m2 |= hpriv->signal[port].amps;
2212 m2 |= hpriv->signal[port].pre;
2215 /* according to mvSata 3.6.1, some IIE values are fixed */
2216 if (IS_GEN_IIE(hpriv)) {
2221 writel(m2, port_mmio + PHY_MODE2);
2224 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2225 unsigned int port_no)
2227 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2229 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2231 if (IS_GEN_II(hpriv)) {
2232 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2233 ifctl |= (1 << 7); /* enable gen2i speed */
2234 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2235 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2238 udelay(25); /* allow reset propagation */
2240 /* Spec never mentions clearing the bit. Marvell's driver does
2241 * clear the bit, however.
2243 writelfl(0, port_mmio + EDMA_CMD_OFS);
2245 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2247 if (IS_GEN_I(hpriv))
2252 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2253 * @ap: ATA channel to manipulate
2255 * Part of this is taken from __sata_phy_reset and modified to
2256 * not sleep since this routine gets called from interrupt level.
2259 * Inherited from caller. This is coded to safe to call at
2260 * interrupt level, i.e. it does not sleep.
2262 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2263 unsigned long deadline)
2265 struct mv_port_priv *pp = ap->private_data;
2266 struct mv_host_priv *hpriv = ap->host->private_data;
2267 void __iomem *port_mmio = mv_ap_base(ap);
2271 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2275 u32 sstatus, serror, scontrol;
2277 mv_scr_read(ap, SCR_STATUS, &sstatus);
2278 mv_scr_read(ap, SCR_ERROR, &serror);
2279 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2280 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2281 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2285 /* Issue COMRESET via SControl */
2287 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2290 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2294 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2295 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2299 } while (time_before(jiffies, deadline));
2301 /* work around errata */
2302 if (IS_GEN_II(hpriv) &&
2303 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2305 goto comreset_retry;
2309 u32 sstatus, serror, scontrol;
2311 mv_scr_read(ap, SCR_STATUS, &sstatus);
2312 mv_scr_read(ap, SCR_ERROR, &serror);
2313 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2314 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2315 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2319 if (ata_link_offline(&ap->link)) {
2320 *class = ATA_DEV_NONE;
2324 /* even after SStatus reflects that device is ready,
2325 * it seems to take a while for link to be fully
2326 * established (and thus Status no longer 0x80/0x7F),
2327 * so we poll a bit for that, here.
2331 u8 drv_stat = ata_check_status(ap);
2332 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2337 if (time_after(jiffies, deadline))
2341 /* FIXME: if we passed the deadline, the following
2342 * code probably produces an invalid result
2345 /* finally, read device signature from TF registers */
2346 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2348 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2350 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2355 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2357 struct ata_port *ap = link->ap;
2358 struct mv_port_priv *pp = ap->private_data;
2359 struct ata_eh_context *ehc = &link->eh_context;
2362 rc = mv_stop_dma(ap);
2364 ehc->i.action |= ATA_EH_HARDRESET;
2366 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2367 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2368 ehc->i.action |= ATA_EH_HARDRESET;
2371 /* if we're about to do hardreset, nothing more to do */
2372 if (ehc->i.action & ATA_EH_HARDRESET)
2375 if (ata_link_online(link))
2376 rc = ata_wait_ready(ap, deadline);
2383 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2384 unsigned long deadline)
2386 struct ata_port *ap = link->ap;
2387 struct mv_host_priv *hpriv = ap->host->private_data;
2388 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2392 mv_channel_reset(hpriv, mmio, ap->port_no);
2394 mv_phy_reset(ap, class, deadline);
2399 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2401 struct ata_port *ap = link->ap;
2404 /* print link status */
2405 sata_print_link_status(link);
2408 sata_scr_read(link, SCR_ERROR, &serr);
2409 sata_scr_write_flush(link, SCR_ERROR, serr);
2411 /* bail out if no device is present */
2412 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2413 DPRINTK("EXIT, no device\n");
2417 /* set up device control */
2418 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2421 static void mv_error_handler(struct ata_port *ap)
2423 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2424 mv_hardreset, mv_postreset);
2427 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2429 mv_stop_dma(qc->ap);
2432 static void mv_eh_freeze(struct ata_port *ap)
2434 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2435 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2439 /* FIXME: handle coalescing completion events properly */
2441 shift = ap->port_no * 2;
2445 mask = 0x3 << shift;
2447 /* disable assertion of portN err, done events */
2448 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2449 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2452 static void mv_eh_thaw(struct ata_port *ap)
2454 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2455 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2456 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2457 void __iomem *port_mmio = mv_ap_base(ap);
2458 u32 tmp, mask, hc_irq_cause;
2459 unsigned int shift, hc_port_no = ap->port_no;
2461 /* FIXME: handle coalescing completion events properly */
2463 shift = ap->port_no * 2;
2469 mask = 0x3 << shift;
2471 /* clear EDMA errors on this port */
2472 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2474 /* clear pending irq events */
2475 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2476 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2477 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2478 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2480 /* enable assertion of portN err, done events */
2481 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2482 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2486 * mv_port_init - Perform some early initialization on a single port.
2487 * @port: libata data structure storing shadow register addresses
2488 * @port_mmio: base address of the port
2490 * Initialize shadow register mmio addresses, clear outstanding
2491 * interrupts on the port, and unmask interrupts for the future
2492 * start of the port.
2495 * Inherited from caller.
2497 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2499 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2502 /* PIO related setup
2504 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2506 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2507 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2508 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2509 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2510 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2511 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2513 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2514 /* special case: control/altstatus doesn't have ATA_REG_ address */
2515 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2518 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2520 /* Clear any currently outstanding port interrupt conditions */
2521 serr_ofs = mv_scr_offset(SCR_ERROR);
2522 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2523 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2525 /* unmask all non-transient EDMA error interrupts */
2526 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2528 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2529 readl(port_mmio + EDMA_CFG_OFS),
2530 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2531 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2534 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2536 struct pci_dev *pdev = to_pci_dev(host->dev);
2537 struct mv_host_priv *hpriv = host->private_data;
2538 u32 hp_flags = hpriv->hp_flags;
2540 switch (board_idx) {
2542 hpriv->ops = &mv5xxx_ops;
2543 hp_flags |= MV_HP_GEN_I;
2545 switch (pdev->revision) {
2547 hp_flags |= MV_HP_ERRATA_50XXB0;
2550 hp_flags |= MV_HP_ERRATA_50XXB2;
2553 dev_printk(KERN_WARNING, &pdev->dev,
2554 "Applying 50XXB2 workarounds to unknown rev\n");
2555 hp_flags |= MV_HP_ERRATA_50XXB2;
2562 hpriv->ops = &mv5xxx_ops;
2563 hp_flags |= MV_HP_GEN_I;
2565 switch (pdev->revision) {
2567 hp_flags |= MV_HP_ERRATA_50XXB0;
2570 hp_flags |= MV_HP_ERRATA_50XXB2;
2573 dev_printk(KERN_WARNING, &pdev->dev,
2574 "Applying B2 workarounds to unknown rev\n");
2575 hp_flags |= MV_HP_ERRATA_50XXB2;
2582 hpriv->ops = &mv6xxx_ops;
2583 hp_flags |= MV_HP_GEN_II;
2585 switch (pdev->revision) {
2587 hp_flags |= MV_HP_ERRATA_60X1B2;
2590 hp_flags |= MV_HP_ERRATA_60X1C0;
2593 dev_printk(KERN_WARNING, &pdev->dev,
2594 "Applying B2 workarounds to unknown rev\n");
2595 hp_flags |= MV_HP_ERRATA_60X1B2;
2601 hp_flags |= MV_HP_PCIE;
2602 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2603 (pdev->device == 0x2300 || pdev->device == 0x2310))
2606 * Highpoint RocketRAID PCIe 23xx series cards:
2608 * Unconfigured drives are treated as "Legacy"
2609 * by the BIOS, and it overwrites sector 8 with
2610 * a "Lgcy" metadata block prior to Linux boot.
2612 * Configured drives (RAID or JBOD) leave sector 8
2613 * alone, but instead overwrite a high numbered
2614 * sector for the RAID metadata. This sector can
2615 * be determined exactly, by truncating the physical
2616 * drive capacity to a nice even GB value.
2618 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2620 * Warn the user, lest they think we're just buggy.
2622 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2623 " BIOS CORRUPTS DATA on all attached drives,"
2624 " regardless of if/how they are configured."
2626 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2627 " use sectors 8-9 on \"Legacy\" drives,"
2628 " and avoid the final two gigabytes on"
2629 " all RocketRAID BIOS initialized drives.\n");
2632 hpriv->ops = &mv6xxx_ops;
2633 hp_flags |= MV_HP_GEN_IIE;
2635 switch (pdev->revision) {
2637 hp_flags |= MV_HP_ERRATA_XX42A0;
2640 hp_flags |= MV_HP_ERRATA_60X1C0;
2643 dev_printk(KERN_WARNING, &pdev->dev,
2644 "Applying 60X1C0 workarounds to unknown rev\n");
2645 hp_flags |= MV_HP_ERRATA_60X1C0;
2651 dev_printk(KERN_ERR, &pdev->dev,
2652 "BUG: invalid board index %u\n", board_idx);
2656 hpriv->hp_flags = hp_flags;
2657 if (hp_flags & MV_HP_PCIE) {
2658 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2659 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2660 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2662 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2663 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2664 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2671 * mv_init_host - Perform some early initialization of the host.
2672 * @host: ATA host to initialize
2673 * @board_idx: controller index
2675 * If possible, do an early global reset of the host. Then do
2676 * our port init and clear/unmask all/relevant host interrupts.
2679 * Inherited from caller.
2681 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2683 int rc = 0, n_hc, port, hc;
2684 struct pci_dev *pdev = to_pci_dev(host->dev);
2685 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2686 struct mv_host_priv *hpriv = host->private_data;
2688 /* global interrupt mask */
2689 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2691 rc = mv_chip_id(host, board_idx);
2695 n_hc = mv_get_hc_count(host->ports[0]->flags);
2697 for (port = 0; port < host->n_ports; port++)
2698 hpriv->ops->read_preamp(hpriv, port, mmio);
2700 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2704 hpriv->ops->reset_flash(hpriv, mmio);
2705 hpriv->ops->reset_bus(pdev, mmio);
2706 hpriv->ops->enable_leds(hpriv, mmio);
2708 for (port = 0; port < host->n_ports; port++) {
2709 if (IS_GEN_II(hpriv)) {
2710 void __iomem *port_mmio = mv_port_base(mmio, port);
2712 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2713 ifctl |= (1 << 7); /* enable gen2i speed */
2714 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2715 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2718 hpriv->ops->phy_errata(hpriv, mmio, port);
2721 for (port = 0; port < host->n_ports; port++) {
2722 struct ata_port *ap = host->ports[port];
2723 void __iomem *port_mmio = mv_port_base(mmio, port);
2724 unsigned int offset = port_mmio - mmio;
2726 mv_port_init(&ap->ioaddr, port_mmio);
2728 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2729 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2732 for (hc = 0; hc < n_hc; hc++) {
2733 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2735 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2736 "(before clear)=0x%08x\n", hc,
2737 readl(hc_mmio + HC_CFG_OFS),
2738 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2740 /* Clear any currently outstanding hc interrupt conditions */
2741 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2744 /* Clear any currently outstanding host interrupt conditions */
2745 writelfl(0, mmio + hpriv->irq_cause_ofs);
2747 /* and unmask interrupt generation for host regs */
2748 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2750 if (IS_GEN_I(hpriv))
2751 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2753 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2755 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2756 "PCI int cause/mask=0x%08x/0x%08x\n",
2757 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2758 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2759 readl(mmio + hpriv->irq_cause_ofs),
2760 readl(mmio + hpriv->irq_mask_ofs));
2767 * mv_print_info - Dump key info to kernel log for perusal.
2768 * @host: ATA host to print info about
2770 * FIXME: complete this.
2773 * Inherited from caller.
2775 static void mv_print_info(struct ata_host *host)
2777 struct pci_dev *pdev = to_pci_dev(host->dev);
2778 struct mv_host_priv *hpriv = host->private_data;
2780 const char *scc_s, *gen;
2782 /* Use this to determine the HW stepping of the chip so we know
2783 * what errata to workaround
2785 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2788 else if (scc == 0x01)
2793 if (IS_GEN_I(hpriv))
2795 else if (IS_GEN_II(hpriv))
2797 else if (IS_GEN_IIE(hpriv))
2802 dev_printk(KERN_INFO, &pdev->dev,
2803 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2804 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2805 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2808 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2810 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2812 if (!hpriv->crqb_pool)
2815 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2817 if (!hpriv->crpb_pool)
2820 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2822 if (!hpriv->sg_tbl_pool)
2829 * mv_init_one - handle a positive probe of a Marvell host
2830 * @pdev: PCI device found
2831 * @ent: PCI device ID entry for the matched host
2834 * Inherited from caller.
2836 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2838 static int printed_version;
2839 unsigned int board_idx = (unsigned int)ent->driver_data;
2840 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2841 struct ata_host *host;
2842 struct mv_host_priv *hpriv;
2845 if (!printed_version++)
2846 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2849 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2851 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2852 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2853 if (!host || !hpriv)
2855 host->private_data = hpriv;
2857 /* acquire resources */
2858 rc = pcim_enable_device(pdev);
2862 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2864 pcim_pin_device(pdev);
2867 host->iomap = pcim_iomap_table(pdev);
2869 rc = pci_go_64(pdev);
2873 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2877 /* initialize adapter */
2878 rc = mv_init_host(host, board_idx);
2882 /* Enable interrupts */
2883 if (msi && pci_enable_msi(pdev))
2886 mv_dump_pci_cfg(pdev, 0x68);
2887 mv_print_info(host);
2889 pci_set_master(pdev);
2890 pci_try_set_mwi(pdev);
2891 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2892 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2895 static int __init mv_init(void)
2897 return pci_register_driver(&mv_pci_driver);
2900 static void __exit mv_exit(void)
2902 pci_unregister_driver(&mv_pci_driver);
2905 MODULE_AUTHOR("Brett Russ");
2906 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2907 MODULE_LICENSE("GPL");
2908 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2909 MODULE_VERSION(DRV_VERSION);
2911 module_param(msi, int, 0444);
2912 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2914 module_init(mv_init);
2915 module_exit(mv_exit);