2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
118 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
119 MV_PORT_HC_SHIFT = 2,
120 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
124 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
125 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
126 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
127 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
128 ATA_FLAG_PIO_POLLING,
129 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
131 CRQB_FLAG_READ = (1 << 0),
133 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
134 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
135 CRQB_CMD_ADDR_SHIFT = 8,
136 CRQB_CMD_CS = (0x2 << 11),
137 CRQB_CMD_LAST = (1 << 15),
139 CRPB_FLAG_STATUS_SHIFT = 8,
140 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
141 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
143 EPRD_FLAG_END_OF_TBL = (1 << 31),
145 /* PCI interface registers */
147 PCI_COMMAND_OFS = 0xc00,
149 PCI_MAIN_CMD_STS_OFS = 0xd30,
150 STOP_PCI_MASTER = (1 << 2),
151 PCI_MASTER_EMPTY = (1 << 3),
152 GLOB_SFT_RST = (1 << 4),
155 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
156 MV_PCI_DISC_TIMER = 0xd04,
157 MV_PCI_MSI_TRIGGER = 0xc38,
158 MV_PCI_SERR_MASK = 0xc28,
159 MV_PCI_XBAR_TMOUT = 0x1d04,
160 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
161 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
162 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
163 MV_PCI_ERR_COMMAND = 0x1d50,
165 PCI_IRQ_CAUSE_OFS = 0x1d58,
166 PCI_IRQ_MASK_OFS = 0x1d5c,
167 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
169 PCIE_IRQ_CAUSE_OFS = 0x1900,
170 PCIE_IRQ_MASK_OFS = 0x1910,
171 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
173 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
174 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
175 PORT0_ERR = (1 << 0), /* shift by port # */
176 PORT0_DONE = (1 << 1), /* shift by port # */
177 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
178 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
180 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
181 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
182 PORTS_0_3_COAL_DONE = (1 << 8),
183 PORTS_4_7_COAL_DONE = (1 << 17),
184 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
185 GPIO_INT = (1 << 22),
186 SELF_INT = (1 << 23),
187 TWSI_INT = (1 << 24),
188 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
189 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
190 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
191 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
193 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 /* SATAHC registers */
199 HC_IRQ_CAUSE_OFS = 0x14,
200 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
201 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
202 DEV_IRQ = (1 << 8), /* shift by port # */
204 /* Shadow block registers */
206 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
209 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
210 SATA_ACTIVE_OFS = 0x350,
211 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
218 SATA_INTERFACE_CTL = 0x050,
220 MV_M2_PREAMP_MASK = 0x7e0,
224 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
225 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
226 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
227 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
228 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
230 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
231 EDMA_ERR_IRQ_MASK_OFS = 0xc,
232 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
233 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
234 EDMA_ERR_DEV = (1 << 2), /* device error */
235 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
236 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
237 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
238 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
239 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
240 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
241 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
242 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
243 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
244 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
245 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
248 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
249 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
251 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
253 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
255 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
256 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
257 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
258 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
259 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
260 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
262 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
264 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
265 EDMA_ERR_OVERRUN_5 = (1 << 5),
266 EDMA_ERR_UNDERRUN_5 = (1 << 6),
268 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
269 EDMA_ERR_LNK_CTRL_RX_1 |
270 EDMA_ERR_LNK_CTRL_RX_3 |
271 EDMA_ERR_LNK_CTRL_TX,
273 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
283 EDMA_ERR_LNK_CTRL_RX_2 |
284 EDMA_ERR_LNK_DATA_RX |
285 EDMA_ERR_LNK_DATA_TX |
286 EDMA_ERR_TRANS_PROTO,
287 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
292 EDMA_ERR_UNDERRUN_5 |
293 EDMA_ERR_SELF_DIS_5 |
299 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
300 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
302 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
303 EDMA_REQ_Q_PTR_SHIFT = 5,
305 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
306 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
307 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
308 EDMA_RSP_Q_PTR_SHIFT = 3,
310 EDMA_CMD_OFS = 0x28, /* EDMA command register */
311 EDMA_EN = (1 << 0), /* enable EDMA */
312 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
313 ATA_RST = (1 << 2), /* reset trans/link/phy */
315 EDMA_IORDY_TMOUT = 0x34,
318 /* Host private flags (hp_flags) */
319 MV_HP_FLAG_MSI = (1 << 0),
320 MV_HP_ERRATA_50XXB0 = (1 << 1),
321 MV_HP_ERRATA_50XXB2 = (1 << 2),
322 MV_HP_ERRATA_60X1B2 = (1 << 3),
323 MV_HP_ERRATA_60X1C0 = (1 << 4),
324 MV_HP_ERRATA_XX42A0 = (1 << 5),
325 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
326 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
327 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
328 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
330 /* Port private flags (pp_flags) */
331 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
332 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
333 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
336 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
337 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
338 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
341 /* DMA boundary 0xffff is required by the s/g splitting
342 * we need on /length/ in mv_fill-sg().
344 MV_DMA_BOUNDARY = 0xffffU,
346 /* mask of register bits containing lower 32 bits
347 * of EDMA request queue DMA address
349 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
351 /* ditto, for response queue */
352 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
365 /* Command ReQuest Block: 32B */
381 /* Command ResPonse Block: 8B */
388 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
396 struct mv_port_priv {
397 struct mv_crqb *crqb;
399 struct mv_crpb *crpb;
401 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
402 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
404 unsigned int req_idx;
405 unsigned int resp_idx;
410 struct mv_port_signal {
415 struct mv_host_priv {
417 struct mv_port_signal signal[8];
418 const struct mv_hw_ops *ops;
423 * These consistent DMA memory pools give us guaranteed
424 * alignment for hardware-accessed data structures,
425 * and less memory waste in accomplishing the alignment.
427 struct dma_pool *crqb_pool;
428 struct dma_pool *crpb_pool;
429 struct dma_pool *sg_tbl_pool;
433 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
435 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
436 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
438 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
440 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
441 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
444 static void mv_irq_clear(struct ata_port *ap);
445 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
446 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
447 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
448 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
449 static int mv_port_start(struct ata_port *ap);
450 static void mv_port_stop(struct ata_port *ap);
451 static void mv_qc_prep(struct ata_queued_cmd *qc);
452 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
453 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
454 static void mv_error_handler(struct ata_port *ap);
455 static void mv_eh_freeze(struct ata_port *ap);
456 static void mv_eh_thaw(struct ata_port *ap);
457 static void mv6_dev_config(struct ata_device *dev);
458 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
460 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
462 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
463 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
465 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
467 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
468 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
470 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
472 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
473 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
475 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
477 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
478 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
479 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
480 unsigned int port_no);
481 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
482 void __iomem *port_mmio, int want_ncq);
483 static int __mv_stop_dma(struct ata_port *ap);
485 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
486 * because we have to allow room for worst case splitting of
487 * PRDs for 64K boundaries in mv_fill_sg().
489 static struct scsi_host_template mv5_sht = {
490 .module = THIS_MODULE,
492 .ioctl = ata_scsi_ioctl,
493 .queuecommand = ata_scsi_queuecmd,
494 .can_queue = ATA_DEF_QUEUE,
495 .this_id = ATA_SHT_THIS_ID,
496 .sg_tablesize = MV_MAX_SG_CT / 2,
497 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
498 .emulated = ATA_SHT_EMULATED,
500 .proc_name = DRV_NAME,
501 .dma_boundary = MV_DMA_BOUNDARY,
502 .slave_configure = ata_scsi_slave_config,
503 .slave_destroy = ata_scsi_slave_destroy,
504 .bios_param = ata_std_bios_param,
507 static struct scsi_host_template mv6_sht = {
508 .module = THIS_MODULE,
510 .ioctl = ata_scsi_ioctl,
511 .queuecommand = ata_scsi_queuecmd,
512 .change_queue_depth = ata_scsi_change_queue_depth,
513 .can_queue = MV_MAX_Q_DEPTH - 1,
514 .this_id = ATA_SHT_THIS_ID,
515 .sg_tablesize = MV_MAX_SG_CT / 2,
516 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
517 .emulated = ATA_SHT_EMULATED,
519 .proc_name = DRV_NAME,
520 .dma_boundary = MV_DMA_BOUNDARY,
521 .slave_configure = ata_scsi_slave_config,
522 .slave_destroy = ata_scsi_slave_destroy,
523 .bios_param = ata_std_bios_param,
526 static const struct ata_port_operations mv5_ops = {
527 .tf_load = ata_tf_load,
528 .tf_read = ata_tf_read,
529 .check_status = ata_check_status,
530 .exec_command = ata_exec_command,
531 .dev_select = ata_std_dev_select,
533 .cable_detect = ata_cable_sata,
535 .qc_prep = mv_qc_prep,
536 .qc_issue = mv_qc_issue,
537 .data_xfer = ata_data_xfer,
539 .irq_clear = mv_irq_clear,
540 .irq_on = ata_irq_on,
542 .error_handler = mv_error_handler,
543 .freeze = mv_eh_freeze,
546 .scr_read = mv5_scr_read,
547 .scr_write = mv5_scr_write,
549 .port_start = mv_port_start,
550 .port_stop = mv_port_stop,
553 static const struct ata_port_operations mv6_ops = {
554 .dev_config = mv6_dev_config,
555 .tf_load = ata_tf_load,
556 .tf_read = ata_tf_read,
557 .check_status = ata_check_status,
558 .exec_command = ata_exec_command,
559 .dev_select = ata_std_dev_select,
561 .cable_detect = ata_cable_sata,
563 .qc_prep = mv_qc_prep,
564 .qc_issue = mv_qc_issue,
565 .data_xfer = ata_data_xfer,
567 .irq_clear = mv_irq_clear,
568 .irq_on = ata_irq_on,
570 .error_handler = mv_error_handler,
571 .freeze = mv_eh_freeze,
573 .qc_defer = ata_std_qc_defer,
575 .scr_read = mv_scr_read,
576 .scr_write = mv_scr_write,
578 .port_start = mv_port_start,
579 .port_stop = mv_port_stop,
582 static const struct ata_port_operations mv_iie_ops = {
583 .tf_load = ata_tf_load,
584 .tf_read = ata_tf_read,
585 .check_status = ata_check_status,
586 .exec_command = ata_exec_command,
587 .dev_select = ata_std_dev_select,
589 .cable_detect = ata_cable_sata,
591 .qc_prep = mv_qc_prep_iie,
592 .qc_issue = mv_qc_issue,
593 .data_xfer = ata_data_xfer,
595 .irq_clear = mv_irq_clear,
596 .irq_on = ata_irq_on,
598 .error_handler = mv_error_handler,
599 .freeze = mv_eh_freeze,
601 .qc_defer = ata_std_qc_defer,
603 .scr_read = mv_scr_read,
604 .scr_write = mv_scr_write,
606 .port_start = mv_port_start,
607 .port_stop = mv_port_stop,
610 static const struct ata_port_info mv_port_info[] = {
612 .flags = MV_COMMON_FLAGS,
613 .pio_mask = 0x1f, /* pio0-4 */
614 .udma_mask = ATA_UDMA6,
615 .port_ops = &mv5_ops,
618 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
619 .pio_mask = 0x1f, /* pio0-4 */
620 .udma_mask = ATA_UDMA6,
621 .port_ops = &mv5_ops,
624 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
625 .pio_mask = 0x1f, /* pio0-4 */
626 .udma_mask = ATA_UDMA6,
627 .port_ops = &mv5_ops,
630 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
632 .pio_mask = 0x1f, /* pio0-4 */
633 .udma_mask = ATA_UDMA6,
634 .port_ops = &mv6_ops,
637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
638 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
639 .pio_mask = 0x1f, /* pio0-4 */
640 .udma_mask = ATA_UDMA6,
641 .port_ops = &mv6_ops,
644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
646 .pio_mask = 0x1f, /* pio0-4 */
647 .udma_mask = ATA_UDMA6,
648 .port_ops = &mv_iie_ops,
651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
653 .pio_mask = 0x1f, /* pio0-4 */
654 .udma_mask = ATA_UDMA6,
655 .port_ops = &mv_iie_ops,
659 static const struct pci_device_id mv_pci_tbl[] = {
660 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
661 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
662 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
663 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
664 /* RocketRAID 1740/174x have different identifiers */
665 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
666 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
668 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
669 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
670 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
671 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
672 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
674 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
677 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
679 /* Marvell 7042 support */
680 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
682 /* Highpoint RocketRAID PCIe series */
683 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
684 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
686 { } /* terminate list */
689 static struct pci_driver mv_pci_driver = {
691 .id_table = mv_pci_tbl,
692 .probe = mv_init_one,
693 .remove = ata_pci_remove_one,
696 static const struct mv_hw_ops mv5xxx_ops = {
697 .phy_errata = mv5_phy_errata,
698 .enable_leds = mv5_enable_leds,
699 .read_preamp = mv5_read_preamp,
700 .reset_hc = mv5_reset_hc,
701 .reset_flash = mv5_reset_flash,
702 .reset_bus = mv5_reset_bus,
705 static const struct mv_hw_ops mv6xxx_ops = {
706 .phy_errata = mv6_phy_errata,
707 .enable_leds = mv6_enable_leds,
708 .read_preamp = mv6_read_preamp,
709 .reset_hc = mv6_reset_hc,
710 .reset_flash = mv6_reset_flash,
711 .reset_bus = mv_reset_pci_bus,
717 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
720 /* move to PCI layer or libata core? */
721 static int pci_go_64(struct pci_dev *pdev)
725 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
726 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
728 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
730 dev_printk(KERN_ERR, &pdev->dev,
731 "64-bit DMA enable failed\n");
736 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
738 dev_printk(KERN_ERR, &pdev->dev,
739 "32-bit DMA enable failed\n");
742 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
744 dev_printk(KERN_ERR, &pdev->dev,
745 "32-bit consistent DMA enable failed\n");
757 static inline void writelfl(unsigned long data, void __iomem *addr)
760 (void) readl(addr); /* flush to avoid PCI posted write */
763 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
765 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
768 static inline unsigned int mv_hc_from_port(unsigned int port)
770 return port >> MV_PORT_HC_SHIFT;
773 static inline unsigned int mv_hardport_from_port(unsigned int port)
775 return port & MV_PORT_MASK;
778 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
781 return mv_hc_base(base, mv_hc_from_port(port));
784 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
786 return mv_hc_base_from_port(base, port) +
787 MV_SATAHC_ARBTR_REG_SZ +
788 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
791 static inline void __iomem *mv_ap_base(struct ata_port *ap)
793 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
796 static inline int mv_get_hc_count(unsigned long port_flags)
798 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
801 static void mv_irq_clear(struct ata_port *ap)
805 static void mv_set_edma_ptrs(void __iomem *port_mmio,
806 struct mv_host_priv *hpriv,
807 struct mv_port_priv *pp)
812 * initialize request queue
814 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
816 WARN_ON(pp->crqb_dma & 0x3ff);
817 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
818 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
819 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
821 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
822 writelfl((pp->crqb_dma & 0xffffffff) | index,
823 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
825 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
828 * initialize response queue
830 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
832 WARN_ON(pp->crpb_dma & 0xff);
833 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
835 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
836 writelfl((pp->crpb_dma & 0xffffffff) | index,
837 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
839 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
841 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
842 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
846 * mv_start_dma - Enable eDMA engine
847 * @base: port base address
848 * @pp: port private data
850 * Verify the local cache of the eDMA state is accurate with a
854 * Inherited from caller.
856 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
857 struct mv_port_priv *pp, u8 protocol)
859 int want_ncq = (protocol == ATA_PROT_NCQ);
861 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
862 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
863 if (want_ncq != using_ncq)
866 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
867 struct mv_host_priv *hpriv = ap->host->private_data;
868 int hard_port = mv_hardport_from_port(ap->port_no);
869 void __iomem *hc_mmio = mv_hc_base_from_port(
870 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
871 u32 hc_irq_cause, ipending;
873 /* clear EDMA event indicators, if any */
874 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
876 /* clear EDMA interrupt indicator, if any */
877 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
878 ipending = (DEV_IRQ << hard_port) |
879 (CRPB_DMA_DONE << hard_port);
880 if (hc_irq_cause & ipending) {
881 writelfl(hc_irq_cause & ~ipending,
882 hc_mmio + HC_IRQ_CAUSE_OFS);
885 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
887 /* clear FIS IRQ Cause */
888 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
890 mv_set_edma_ptrs(port_mmio, hpriv, pp);
892 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
893 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
895 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
899 * __mv_stop_dma - Disable eDMA engine
900 * @ap: ATA channel to manipulate
902 * Verify the local cache of the eDMA state is accurate with a
906 * Inherited from caller.
908 static int __mv_stop_dma(struct ata_port *ap)
910 void __iomem *port_mmio = mv_ap_base(ap);
911 struct mv_port_priv *pp = ap->private_data;
915 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
916 /* Disable EDMA if active. The disable bit auto clears.
918 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
919 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
921 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
924 /* now properly wait for the eDMA to stop */
925 for (i = 1000; i > 0; i--) {
926 reg = readl(port_mmio + EDMA_CMD_OFS);
927 if (!(reg & EDMA_EN))
934 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
941 static int mv_stop_dma(struct ata_port *ap)
946 spin_lock_irqsave(&ap->host->lock, flags);
947 rc = __mv_stop_dma(ap);
948 spin_unlock_irqrestore(&ap->host->lock, flags);
954 static void mv_dump_mem(void __iomem *start, unsigned bytes)
957 for (b = 0; b < bytes; ) {
958 DPRINTK("%p: ", start + b);
959 for (w = 0; b < bytes && w < 4; w++) {
960 printk("%08x ", readl(start + b));
968 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
973 for (b = 0; b < bytes; ) {
974 DPRINTK("%02x: ", b);
975 for (w = 0; b < bytes && w < 4; w++) {
976 (void) pci_read_config_dword(pdev, b, &dw);
984 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
985 struct pci_dev *pdev)
988 void __iomem *hc_base = mv_hc_base(mmio_base,
989 port >> MV_PORT_HC_SHIFT);
990 void __iomem *port_base;
991 int start_port, num_ports, p, start_hc, num_hcs, hc;
994 start_hc = start_port = 0;
995 num_ports = 8; /* shld be benign for 4 port devs */
998 start_hc = port >> MV_PORT_HC_SHIFT;
1000 num_ports = num_hcs = 1;
1002 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1003 num_ports > 1 ? num_ports - 1 : start_port);
1006 DPRINTK("PCI config space regs:\n");
1007 mv_dump_pci_cfg(pdev, 0x68);
1009 DPRINTK("PCI regs:\n");
1010 mv_dump_mem(mmio_base+0xc00, 0x3c);
1011 mv_dump_mem(mmio_base+0xd00, 0x34);
1012 mv_dump_mem(mmio_base+0xf00, 0x4);
1013 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1014 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1015 hc_base = mv_hc_base(mmio_base, hc);
1016 DPRINTK("HC regs (HC %i):\n", hc);
1017 mv_dump_mem(hc_base, 0x1c);
1019 for (p = start_port; p < start_port + num_ports; p++) {
1020 port_base = mv_port_base(mmio_base, p);
1021 DPRINTK("EDMA regs (port %i):\n", p);
1022 mv_dump_mem(port_base, 0x54);
1023 DPRINTK("SATA regs (port %i):\n", p);
1024 mv_dump_mem(port_base+0x300, 0x60);
1029 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1033 switch (sc_reg_in) {
1037 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1040 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1049 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1051 unsigned int ofs = mv_scr_offset(sc_reg_in);
1053 if (ofs != 0xffffffffU) {
1054 *val = readl(mv_ap_base(ap) + ofs);
1060 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1062 unsigned int ofs = mv_scr_offset(sc_reg_in);
1064 if (ofs != 0xffffffffU) {
1065 writelfl(val, mv_ap_base(ap) + ofs);
1071 static void mv6_dev_config(struct ata_device *adev)
1074 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1075 * See mv_qc_prep() for more info.
1077 if (adev->flags & ATA_DFLAG_NCQ)
1078 if (adev->max_sectors > ATA_MAX_SECTORS)
1079 adev->max_sectors = ATA_MAX_SECTORS;
1082 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1083 void __iomem *port_mmio, int want_ncq)
1087 /* set up non-NCQ EDMA configuration */
1088 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1090 if (IS_GEN_I(hpriv))
1091 cfg |= (1 << 8); /* enab config burst size mask */
1093 else if (IS_GEN_II(hpriv))
1094 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1096 else if (IS_GEN_IIE(hpriv)) {
1097 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1098 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1099 cfg |= (1 << 18); /* enab early completion */
1100 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1104 cfg |= EDMA_CFG_NCQ;
1105 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1107 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1109 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1112 static void mv_port_free_dma_mem(struct ata_port *ap)
1114 struct mv_host_priv *hpriv = ap->host->private_data;
1115 struct mv_port_priv *pp = ap->private_data;
1119 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1123 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1127 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1128 * For later hardware, we have one unique sg_tbl per NCQ tag.
1130 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1131 if (pp->sg_tbl[tag]) {
1132 if (tag == 0 || !IS_GEN_I(hpriv))
1133 dma_pool_free(hpriv->sg_tbl_pool,
1135 pp->sg_tbl_dma[tag]);
1136 pp->sg_tbl[tag] = NULL;
1142 * mv_port_start - Port specific init/start routine.
1143 * @ap: ATA channel to manipulate
1145 * Allocate and point to DMA memory, init port private memory,
1149 * Inherited from caller.
1151 static int mv_port_start(struct ata_port *ap)
1153 struct device *dev = ap->host->dev;
1154 struct mv_host_priv *hpriv = ap->host->private_data;
1155 struct mv_port_priv *pp;
1156 void __iomem *port_mmio = mv_ap_base(ap);
1157 unsigned long flags;
1160 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1163 ap->private_data = pp;
1165 rc = ata_pad_alloc(ap, dev);
1169 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1172 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1174 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1176 goto out_port_free_dma_mem;
1177 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1180 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1181 * For later hardware, we need one unique sg_tbl per NCQ tag.
1183 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1184 if (tag == 0 || !IS_GEN_I(hpriv)) {
1185 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1186 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1187 if (!pp->sg_tbl[tag])
1188 goto out_port_free_dma_mem;
1190 pp->sg_tbl[tag] = pp->sg_tbl[0];
1191 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1195 spin_lock_irqsave(&ap->host->lock, flags);
1197 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1198 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1200 spin_unlock_irqrestore(&ap->host->lock, flags);
1202 /* Don't turn on EDMA here...do it before DMA commands only. Else
1203 * we'll be unable to send non-data, PIO, etc due to restricted access
1208 out_port_free_dma_mem:
1209 mv_port_free_dma_mem(ap);
1214 * mv_port_stop - Port specific cleanup/stop routine.
1215 * @ap: ATA channel to manipulate
1217 * Stop DMA, cleanup port memory.
1220 * This routine uses the host lock to protect the DMA stop.
1222 static void mv_port_stop(struct ata_port *ap)
1225 mv_port_free_dma_mem(ap);
1229 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1230 * @qc: queued command whose SG list to source from
1232 * Populate the SG list and mark the last entry.
1235 * Inherited from caller.
1237 static void mv_fill_sg(struct ata_queued_cmd *qc)
1239 struct mv_port_priv *pp = qc->ap->private_data;
1240 struct scatterlist *sg;
1241 struct mv_sg *mv_sg, *last_sg = NULL;
1244 mv_sg = pp->sg_tbl[qc->tag];
1245 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1246 dma_addr_t addr = sg_dma_address(sg);
1247 u32 sg_len = sg_dma_len(sg);
1250 u32 offset = addr & 0xffff;
1253 if ((offset + sg_len > 0x10000))
1254 len = 0x10000 - offset;
1256 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1257 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1258 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1268 if (likely(last_sg))
1269 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1272 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1274 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1275 (last ? CRQB_CMD_LAST : 0);
1276 *cmdw = cpu_to_le16(tmp);
1280 * mv_qc_prep - Host specific command preparation.
1281 * @qc: queued command to prepare
1283 * This routine simply redirects to the general purpose routine
1284 * if command is not DMA. Else, it handles prep of the CRQB
1285 * (command request block), does some sanity checking, and calls
1286 * the SG load routine.
1289 * Inherited from caller.
1291 static void mv_qc_prep(struct ata_queued_cmd *qc)
1293 struct ata_port *ap = qc->ap;
1294 struct mv_port_priv *pp = ap->private_data;
1296 struct ata_taskfile *tf;
1300 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1301 (qc->tf.protocol != ATA_PROT_NCQ))
1304 /* Fill in command request block
1306 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1307 flags |= CRQB_FLAG_READ;
1308 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1309 flags |= qc->tag << CRQB_TAG_SHIFT;
1311 /* get current queue index from software */
1312 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1314 pp->crqb[in_index].sg_addr =
1315 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1316 pp->crqb[in_index].sg_addr_hi =
1317 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1318 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1320 cw = &pp->crqb[in_index].ata_cmd[0];
1323 /* Sadly, the CRQB cannot accomodate all registers--there are
1324 * only 11 bytes...so we must pick and choose required
1325 * registers based on the command. So, we drop feature and
1326 * hob_feature for [RW] DMA commands, but they are needed for
1327 * NCQ. NCQ will drop hob_nsect.
1329 switch (tf->command) {
1331 case ATA_CMD_READ_EXT:
1333 case ATA_CMD_WRITE_EXT:
1334 case ATA_CMD_WRITE_FUA_EXT:
1335 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1337 case ATA_CMD_FPDMA_READ:
1338 case ATA_CMD_FPDMA_WRITE:
1339 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1340 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1343 /* The only other commands EDMA supports in non-queued and
1344 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1345 * of which are defined/used by Linux. If we get here, this
1346 * driver needs work.
1348 * FIXME: modify libata to give qc_prep a return value and
1349 * return error here.
1351 BUG_ON(tf->command);
1354 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1355 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1356 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1357 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1358 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1359 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1360 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1361 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1362 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1364 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1370 * mv_qc_prep_iie - Host specific command preparation.
1371 * @qc: queued command to prepare
1373 * This routine simply redirects to the general purpose routine
1374 * if command is not DMA. Else, it handles prep of the CRQB
1375 * (command request block), does some sanity checking, and calls
1376 * the SG load routine.
1379 * Inherited from caller.
1381 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1383 struct ata_port *ap = qc->ap;
1384 struct mv_port_priv *pp = ap->private_data;
1385 struct mv_crqb_iie *crqb;
1386 struct ata_taskfile *tf;
1390 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1391 (qc->tf.protocol != ATA_PROT_NCQ))
1394 /* Fill in Gen IIE command request block
1396 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1397 flags |= CRQB_FLAG_READ;
1399 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1400 flags |= qc->tag << CRQB_TAG_SHIFT;
1401 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1403 /* get current queue index from software */
1404 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1406 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1407 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1408 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1409 crqb->flags = cpu_to_le32(flags);
1412 crqb->ata_cmd[0] = cpu_to_le32(
1413 (tf->command << 16) |
1416 crqb->ata_cmd[1] = cpu_to_le32(
1422 crqb->ata_cmd[2] = cpu_to_le32(
1423 (tf->hob_lbal << 0) |
1424 (tf->hob_lbam << 8) |
1425 (tf->hob_lbah << 16) |
1426 (tf->hob_feature << 24)
1428 crqb->ata_cmd[3] = cpu_to_le32(
1430 (tf->hob_nsect << 8)
1433 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1439 * mv_qc_issue - Initiate a command to the host
1440 * @qc: queued command to start
1442 * This routine simply redirects to the general purpose routine
1443 * if command is not DMA. Else, it sanity checks our local
1444 * caches of the request producer/consumer indices then enables
1445 * DMA and bumps the request producer index.
1448 * Inherited from caller.
1450 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1452 struct ata_port *ap = qc->ap;
1453 void __iomem *port_mmio = mv_ap_base(ap);
1454 struct mv_port_priv *pp = ap->private_data;
1457 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1458 (qc->tf.protocol != ATA_PROT_NCQ)) {
1459 /* We're about to send a non-EDMA capable command to the
1460 * port. Turn off EDMA so there won't be problems accessing
1461 * shadow block, etc registers.
1464 return ata_qc_issue_prot(qc);
1467 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1471 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1473 /* and write the request in pointer to kick the EDMA to life */
1474 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1475 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1481 * mv_err_intr - Handle error interrupts on the port
1482 * @ap: ATA channel to manipulate
1483 * @reset_allowed: bool: 0 == don't trigger from reset here
1485 * In most cases, just clear the interrupt and move on. However,
1486 * some cases require an eDMA reset, which is done right before
1487 * the COMRESET in mv_phy_reset(). The SERR case requires a
1488 * clear of pending errors in the SATA SERROR register. Finally,
1489 * if the port disabled DMA, update our cached copy to match.
1492 * Inherited from caller.
1494 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1496 void __iomem *port_mmio = mv_ap_base(ap);
1497 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1498 struct mv_port_priv *pp = ap->private_data;
1499 struct mv_host_priv *hpriv = ap->host->private_data;
1500 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1501 unsigned int action = 0, err_mask = 0;
1502 struct ata_eh_info *ehi = &ap->link.eh_info;
1504 ata_ehi_clear_desc(ehi);
1506 if (!edma_enabled) {
1507 /* just a guess: do we need to do this? should we
1508 * expand this, and do it in all cases?
1510 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1511 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1514 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1516 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1519 * all generations share these EDMA error cause bits
1522 if (edma_err_cause & EDMA_ERR_DEV)
1523 err_mask |= AC_ERR_DEV;
1524 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1525 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1526 EDMA_ERR_INTRL_PAR)) {
1527 err_mask |= AC_ERR_ATA_BUS;
1528 action |= ATA_EH_HARDRESET;
1529 ata_ehi_push_desc(ehi, "parity error");
1531 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1532 ata_ehi_hotplugged(ehi);
1533 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1534 "dev disconnect" : "dev connect");
1535 action |= ATA_EH_HARDRESET;
1538 if (IS_GEN_I(hpriv)) {
1539 eh_freeze_mask = EDMA_EH_FREEZE_5;
1541 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1542 struct mv_port_priv *pp = ap->private_data;
1543 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1544 ata_ehi_push_desc(ehi, "EDMA self-disable");
1547 eh_freeze_mask = EDMA_EH_FREEZE;
1549 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1550 struct mv_port_priv *pp = ap->private_data;
1551 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1552 ata_ehi_push_desc(ehi, "EDMA self-disable");
1555 if (edma_err_cause & EDMA_ERR_SERR) {
1556 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1557 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1558 err_mask = AC_ERR_ATA_BUS;
1559 action |= ATA_EH_HARDRESET;
1563 /* Clear EDMA now that SERR cleanup done */
1564 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1567 err_mask = AC_ERR_OTHER;
1568 action |= ATA_EH_HARDRESET;
1571 ehi->serror |= serr;
1572 ehi->action |= action;
1575 qc->err_mask |= err_mask;
1577 ehi->err_mask |= err_mask;
1579 if (edma_err_cause & eh_freeze_mask)
1580 ata_port_freeze(ap);
1585 static void mv_intr_pio(struct ata_port *ap)
1587 struct ata_queued_cmd *qc;
1590 /* ignore spurious intr if drive still BUSY */
1591 ata_status = readb(ap->ioaddr.status_addr);
1592 if (unlikely(ata_status & ATA_BUSY))
1595 /* get active ATA command */
1596 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1597 if (unlikely(!qc)) /* no active tag */
1599 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1602 /* and finally, complete the ATA command */
1603 qc->err_mask |= ac_err_mask(ata_status);
1604 ata_qc_complete(qc);
1607 static void mv_intr_edma(struct ata_port *ap)
1609 void __iomem *port_mmio = mv_ap_base(ap);
1610 struct mv_host_priv *hpriv = ap->host->private_data;
1611 struct mv_port_priv *pp = ap->private_data;
1612 struct ata_queued_cmd *qc;
1613 u32 out_index, in_index;
1614 bool work_done = false;
1616 /* get h/w response queue pointer */
1617 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1618 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1624 /* get s/w response queue last-read pointer, and compare */
1625 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1626 if (in_index == out_index)
1629 /* 50xx: get active ATA command */
1630 if (IS_GEN_I(hpriv))
1631 tag = ap->link.active_tag;
1633 /* Gen II/IIE: get active ATA command via tag, to enable
1634 * support for queueing. this works transparently for
1635 * queued and non-queued modes.
1638 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1640 qc = ata_qc_from_tag(ap, tag);
1642 /* For non-NCQ mode, the lower 8 bits of status
1643 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1644 * which should be zero if all went well.
1646 status = le16_to_cpu(pp->crpb[out_index].flags);
1647 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1648 mv_err_intr(ap, qc);
1652 /* and finally, complete the ATA command */
1655 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1656 ata_qc_complete(qc);
1659 /* advance software response queue pointer, to
1660 * indicate (after the loop completes) to hardware
1661 * that we have consumed a response queue entry.
1668 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1669 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1670 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1674 * mv_host_intr - Handle all interrupts on the given host controller
1675 * @host: host specific structure
1676 * @relevant: port error bits relevant to this host controller
1677 * @hc: which host controller we're to look at
1679 * Read then write clear the HC interrupt status then walk each
1680 * port connected to the HC and see if it needs servicing. Port
1681 * success ints are reported in the HC interrupt status reg, the
1682 * port error ints are reported in the higher level main
1683 * interrupt status register and thus are passed in via the
1684 * 'relevant' argument.
1687 * Inherited from caller.
1689 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1691 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1692 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1699 port0 = MV_PORTS_PER_HC;
1701 /* we'll need the HC success int register in most cases */
1702 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1706 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1708 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1709 hc, relevant, hc_irq_cause);
1711 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1712 struct ata_port *ap = host->ports[port];
1713 struct mv_port_priv *pp = ap->private_data;
1714 int have_err_bits, hard_port, shift;
1716 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1719 shift = port << 1; /* (port * 2) */
1720 if (port >= MV_PORTS_PER_HC) {
1721 shift++; /* skip bit 8 in the HC Main IRQ reg */
1723 have_err_bits = ((PORT0_ERR << shift) & relevant);
1725 if (unlikely(have_err_bits)) {
1726 struct ata_queued_cmd *qc;
1728 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1729 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1732 mv_err_intr(ap, qc);
1736 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1738 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1739 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1742 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1749 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1751 struct mv_host_priv *hpriv = host->private_data;
1752 struct ata_port *ap;
1753 struct ata_queued_cmd *qc;
1754 struct ata_eh_info *ehi;
1755 unsigned int i, err_mask, printed = 0;
1758 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1760 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1763 DPRINTK("All regs @ PCI error\n");
1764 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1766 writelfl(0, mmio + hpriv->irq_cause_ofs);
1768 for (i = 0; i < host->n_ports; i++) {
1769 ap = host->ports[i];
1770 if (!ata_link_offline(&ap->link)) {
1771 ehi = &ap->link.eh_info;
1772 ata_ehi_clear_desc(ehi);
1774 ata_ehi_push_desc(ehi,
1775 "PCI err cause 0x%08x", err_cause);
1776 err_mask = AC_ERR_HOST_BUS;
1777 ehi->action = ATA_EH_HARDRESET;
1778 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1780 qc->err_mask |= err_mask;
1782 ehi->err_mask |= err_mask;
1784 ata_port_freeze(ap);
1790 * mv_interrupt - Main interrupt event handler
1792 * @dev_instance: private data; in this case the host structure
1794 * Read the read only register to determine if any host
1795 * controllers have pending interrupts. If so, call lower level
1796 * routine to handle. Also check for PCI errors which are only
1800 * This routine holds the host lock while processing pending
1803 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1805 struct ata_host *host = dev_instance;
1806 unsigned int hc, handled = 0, n_hcs;
1807 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1808 u32 irq_stat, irq_mask;
1810 spin_lock(&host->lock);
1811 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1812 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1814 /* check the cases where we either have nothing pending or have read
1815 * a bogus register value which can indicate HW removal or PCI fault
1817 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1820 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1822 if (unlikely(irq_stat & PCI_ERR)) {
1823 mv_pci_error(host, mmio);
1825 goto out_unlock; /* skip all other HC irq handling */
1828 for (hc = 0; hc < n_hcs; hc++) {
1829 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1831 mv_host_intr(host, relevant, hc);
1837 spin_unlock(&host->lock);
1839 return IRQ_RETVAL(handled);
1842 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1844 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1845 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1847 return hc_mmio + ofs;
1850 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1854 switch (sc_reg_in) {
1858 ofs = sc_reg_in * sizeof(u32);
1867 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1869 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1870 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1871 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1873 if (ofs != 0xffffffffU) {
1874 *val = readl(addr + ofs);
1880 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1882 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1883 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1884 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1886 if (ofs != 0xffffffffU) {
1887 writelfl(val, addr + ofs);
1893 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1897 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1900 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1902 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1905 mv_reset_pci_bus(pdev, mmio);
1908 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1910 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1913 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1916 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1919 tmp = readl(phy_mmio + MV5_PHY_MODE);
1921 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1922 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1925 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1929 writel(0, mmio + MV_GPIO_PORT_CTL);
1931 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1933 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1935 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1938 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1941 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1942 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1944 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1947 tmp = readl(phy_mmio + MV5_LT_MODE);
1949 writel(tmp, phy_mmio + MV5_LT_MODE);
1951 tmp = readl(phy_mmio + MV5_PHY_CTL);
1954 writel(tmp, phy_mmio + MV5_PHY_CTL);
1957 tmp = readl(phy_mmio + MV5_PHY_MODE);
1959 tmp |= hpriv->signal[port].pre;
1960 tmp |= hpriv->signal[port].amps;
1961 writel(tmp, phy_mmio + MV5_PHY_MODE);
1966 #define ZERO(reg) writel(0, port_mmio + (reg))
1967 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1970 void __iomem *port_mmio = mv_port_base(mmio, port);
1972 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1974 mv_channel_reset(hpriv, mmio, port);
1976 ZERO(0x028); /* command */
1977 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1978 ZERO(0x004); /* timer */
1979 ZERO(0x008); /* irq err cause */
1980 ZERO(0x00c); /* irq err mask */
1981 ZERO(0x010); /* rq bah */
1982 ZERO(0x014); /* rq inp */
1983 ZERO(0x018); /* rq outp */
1984 ZERO(0x01c); /* respq bah */
1985 ZERO(0x024); /* respq outp */
1986 ZERO(0x020); /* respq inp */
1987 ZERO(0x02c); /* test control */
1988 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1992 #define ZERO(reg) writel(0, hc_mmio + (reg))
1993 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1996 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2004 tmp = readl(hc_mmio + 0x20);
2007 writel(tmp, hc_mmio + 0x20);
2011 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2014 unsigned int hc, port;
2016 for (hc = 0; hc < n_hc; hc++) {
2017 for (port = 0; port < MV_PORTS_PER_HC; port++)
2018 mv5_reset_hc_port(hpriv, mmio,
2019 (hc * MV_PORTS_PER_HC) + port);
2021 mv5_reset_one_hc(hpriv, mmio, hc);
2028 #define ZERO(reg) writel(0, mmio + (reg))
2029 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
2031 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2032 struct mv_host_priv *hpriv = host->private_data;
2035 tmp = readl(mmio + MV_PCI_MODE);
2037 writel(tmp, mmio + MV_PCI_MODE);
2039 ZERO(MV_PCI_DISC_TIMER);
2040 ZERO(MV_PCI_MSI_TRIGGER);
2041 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2042 ZERO(HC_MAIN_IRQ_MASK_OFS);
2043 ZERO(MV_PCI_SERR_MASK);
2044 ZERO(hpriv->irq_cause_ofs);
2045 ZERO(hpriv->irq_mask_ofs);
2046 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2047 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2048 ZERO(MV_PCI_ERR_ATTRIBUTE);
2049 ZERO(MV_PCI_ERR_COMMAND);
2053 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2057 mv5_reset_flash(hpriv, mmio);
2059 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2061 tmp |= (1 << 5) | (1 << 6);
2062 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2066 * mv6_reset_hc - Perform the 6xxx global soft reset
2067 * @mmio: base address of the HBA
2069 * This routine only applies to 6xxx parts.
2072 * Inherited from caller.
2074 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2077 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2081 /* Following procedure defined in PCI "main command and status
2085 writel(t | STOP_PCI_MASTER, reg);
2087 for (i = 0; i < 1000; i++) {
2090 if (PCI_MASTER_EMPTY & t)
2093 if (!(PCI_MASTER_EMPTY & t)) {
2094 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2102 writel(t | GLOB_SFT_RST, reg);
2105 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2107 if (!(GLOB_SFT_RST & t)) {
2108 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2113 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2116 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2119 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2121 if (GLOB_SFT_RST & t) {
2122 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2129 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2132 void __iomem *port_mmio;
2135 tmp = readl(mmio + MV_RESET_CFG);
2136 if ((tmp & (1 << 0)) == 0) {
2137 hpriv->signal[idx].amps = 0x7 << 8;
2138 hpriv->signal[idx].pre = 0x1 << 5;
2142 port_mmio = mv_port_base(mmio, idx);
2143 tmp = readl(port_mmio + PHY_MODE2);
2145 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2146 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2149 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2151 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2154 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2157 void __iomem *port_mmio = mv_port_base(mmio, port);
2159 u32 hp_flags = hpriv->hp_flags;
2161 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2163 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2166 if (fix_phy_mode2) {
2167 m2 = readl(port_mmio + PHY_MODE2);
2170 writel(m2, port_mmio + PHY_MODE2);
2174 m2 = readl(port_mmio + PHY_MODE2);
2175 m2 &= ~((1 << 16) | (1 << 31));
2176 writel(m2, port_mmio + PHY_MODE2);
2181 /* who knows what this magic does */
2182 tmp = readl(port_mmio + PHY_MODE3);
2185 writel(tmp, port_mmio + PHY_MODE3);
2187 if (fix_phy_mode4) {
2190 m4 = readl(port_mmio + PHY_MODE4);
2192 if (hp_flags & MV_HP_ERRATA_60X1B2)
2193 tmp = readl(port_mmio + 0x310);
2195 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2197 writel(m4, port_mmio + PHY_MODE4);
2199 if (hp_flags & MV_HP_ERRATA_60X1B2)
2200 writel(tmp, port_mmio + 0x310);
2203 /* Revert values of pre-emphasis and signal amps to the saved ones */
2204 m2 = readl(port_mmio + PHY_MODE2);
2206 m2 &= ~MV_M2_PREAMP_MASK;
2207 m2 |= hpriv->signal[port].amps;
2208 m2 |= hpriv->signal[port].pre;
2211 /* according to mvSata 3.6.1, some IIE values are fixed */
2212 if (IS_GEN_IIE(hpriv)) {
2217 writel(m2, port_mmio + PHY_MODE2);
2220 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2221 unsigned int port_no)
2223 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2225 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2227 if (IS_GEN_II(hpriv)) {
2228 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2229 ifctl |= (1 << 7); /* enable gen2i speed */
2230 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2231 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2234 udelay(25); /* allow reset propagation */
2236 /* Spec never mentions clearing the bit. Marvell's driver does
2237 * clear the bit, however.
2239 writelfl(0, port_mmio + EDMA_CMD_OFS);
2241 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2243 if (IS_GEN_I(hpriv))
2248 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2249 * @ap: ATA channel to manipulate
2251 * Part of this is taken from __sata_phy_reset and modified to
2252 * not sleep since this routine gets called from interrupt level.
2255 * Inherited from caller. This is coded to safe to call at
2256 * interrupt level, i.e. it does not sleep.
2258 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2259 unsigned long deadline)
2261 struct mv_port_priv *pp = ap->private_data;
2262 struct mv_host_priv *hpriv = ap->host->private_data;
2263 void __iomem *port_mmio = mv_ap_base(ap);
2267 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2271 u32 sstatus, serror, scontrol;
2273 mv_scr_read(ap, SCR_STATUS, &sstatus);
2274 mv_scr_read(ap, SCR_ERROR, &serror);
2275 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2276 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2277 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2281 /* Issue COMRESET via SControl */
2283 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2286 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2290 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2291 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2295 } while (time_before(jiffies, deadline));
2297 /* work around errata */
2298 if (IS_GEN_II(hpriv) &&
2299 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2301 goto comreset_retry;
2305 u32 sstatus, serror, scontrol;
2307 mv_scr_read(ap, SCR_STATUS, &sstatus);
2308 mv_scr_read(ap, SCR_ERROR, &serror);
2309 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2310 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2311 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2315 if (ata_link_offline(&ap->link)) {
2316 *class = ATA_DEV_NONE;
2320 /* even after SStatus reflects that device is ready,
2321 * it seems to take a while for link to be fully
2322 * established (and thus Status no longer 0x80/0x7F),
2323 * so we poll a bit for that, here.
2327 u8 drv_stat = ata_check_status(ap);
2328 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2333 if (time_after(jiffies, deadline))
2337 /* FIXME: if we passed the deadline, the following
2338 * code probably produces an invalid result
2341 /* finally, read device signature from TF registers */
2342 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2344 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2346 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2351 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2353 struct ata_port *ap = link->ap;
2354 struct mv_port_priv *pp = ap->private_data;
2355 struct ata_eh_context *ehc = &link->eh_context;
2358 rc = mv_stop_dma(ap);
2360 ehc->i.action |= ATA_EH_HARDRESET;
2362 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2363 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2364 ehc->i.action |= ATA_EH_HARDRESET;
2367 /* if we're about to do hardreset, nothing more to do */
2368 if (ehc->i.action & ATA_EH_HARDRESET)
2371 if (ata_link_online(link))
2372 rc = ata_wait_ready(ap, deadline);
2379 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2380 unsigned long deadline)
2382 struct ata_port *ap = link->ap;
2383 struct mv_host_priv *hpriv = ap->host->private_data;
2384 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2388 mv_channel_reset(hpriv, mmio, ap->port_no);
2390 mv_phy_reset(ap, class, deadline);
2395 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2397 struct ata_port *ap = link->ap;
2400 /* print link status */
2401 sata_print_link_status(link);
2404 sata_scr_read(link, SCR_ERROR, &serr);
2405 sata_scr_write_flush(link, SCR_ERROR, serr);
2407 /* bail out if no device is present */
2408 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2409 DPRINTK("EXIT, no device\n");
2413 /* set up device control */
2414 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2417 static void mv_error_handler(struct ata_port *ap)
2419 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2420 mv_hardreset, mv_postreset);
2423 static void mv_eh_freeze(struct ata_port *ap)
2425 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2426 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2430 /* FIXME: handle coalescing completion events properly */
2432 shift = ap->port_no * 2;
2436 mask = 0x3 << shift;
2438 /* disable assertion of portN err, done events */
2439 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2440 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2443 static void mv_eh_thaw(struct ata_port *ap)
2445 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2446 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2447 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2448 void __iomem *port_mmio = mv_ap_base(ap);
2449 u32 tmp, mask, hc_irq_cause;
2450 unsigned int shift, hc_port_no = ap->port_no;
2452 /* FIXME: handle coalescing completion events properly */
2454 shift = ap->port_no * 2;
2460 mask = 0x3 << shift;
2462 /* clear EDMA errors on this port */
2463 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2465 /* clear pending irq events */
2466 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2467 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2468 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2469 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2471 /* enable assertion of portN err, done events */
2472 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2473 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2477 * mv_port_init - Perform some early initialization on a single port.
2478 * @port: libata data structure storing shadow register addresses
2479 * @port_mmio: base address of the port
2481 * Initialize shadow register mmio addresses, clear outstanding
2482 * interrupts on the port, and unmask interrupts for the future
2483 * start of the port.
2486 * Inherited from caller.
2488 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2490 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2493 /* PIO related setup
2495 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2497 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2498 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2499 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2500 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2501 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2502 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2504 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2505 /* special case: control/altstatus doesn't have ATA_REG_ address */
2506 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2509 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2511 /* Clear any currently outstanding port interrupt conditions */
2512 serr_ofs = mv_scr_offset(SCR_ERROR);
2513 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2514 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2516 /* unmask all non-transient EDMA error interrupts */
2517 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2519 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2520 readl(port_mmio + EDMA_CFG_OFS),
2521 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2522 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2525 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2527 struct pci_dev *pdev = to_pci_dev(host->dev);
2528 struct mv_host_priv *hpriv = host->private_data;
2529 u32 hp_flags = hpriv->hp_flags;
2531 switch (board_idx) {
2533 hpriv->ops = &mv5xxx_ops;
2534 hp_flags |= MV_HP_GEN_I;
2536 switch (pdev->revision) {
2538 hp_flags |= MV_HP_ERRATA_50XXB0;
2541 hp_flags |= MV_HP_ERRATA_50XXB2;
2544 dev_printk(KERN_WARNING, &pdev->dev,
2545 "Applying 50XXB2 workarounds to unknown rev\n");
2546 hp_flags |= MV_HP_ERRATA_50XXB2;
2553 hpriv->ops = &mv5xxx_ops;
2554 hp_flags |= MV_HP_GEN_I;
2556 switch (pdev->revision) {
2558 hp_flags |= MV_HP_ERRATA_50XXB0;
2561 hp_flags |= MV_HP_ERRATA_50XXB2;
2564 dev_printk(KERN_WARNING, &pdev->dev,
2565 "Applying B2 workarounds to unknown rev\n");
2566 hp_flags |= MV_HP_ERRATA_50XXB2;
2573 hpriv->ops = &mv6xxx_ops;
2574 hp_flags |= MV_HP_GEN_II;
2576 switch (pdev->revision) {
2578 hp_flags |= MV_HP_ERRATA_60X1B2;
2581 hp_flags |= MV_HP_ERRATA_60X1C0;
2584 dev_printk(KERN_WARNING, &pdev->dev,
2585 "Applying B2 workarounds to unknown rev\n");
2586 hp_flags |= MV_HP_ERRATA_60X1B2;
2592 hp_flags |= MV_HP_PCIE;
2593 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2594 (pdev->device == 0x2300 || pdev->device == 0x2310))
2597 * Highpoint RocketRAID PCIe 23xx series cards:
2599 * Unconfigured drives are treated as "Legacy"
2600 * by the BIOS, and it overwrites sector 8 with
2601 * a "Lgcy" metadata block prior to Linux boot.
2603 * Configured drives (RAID or JBOD) leave sector 8
2604 * alone, but instead overwrite a high numbered
2605 * sector for the RAID metadata. This sector can
2606 * be determined exactly, by truncating the physical
2607 * drive capacity to a nice even GB value.
2609 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2611 * Warn the user, lest they think we're just buggy.
2613 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2614 " BIOS CORRUPTS DATA on all attached drives,"
2615 " regardless of if/how they are configured."
2617 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2618 " use sectors 8-9 on \"Legacy\" drives,"
2619 " and avoid the final two gigabytes on"
2620 " all RocketRAID BIOS initialized drives.\n");
2623 hpriv->ops = &mv6xxx_ops;
2624 hp_flags |= MV_HP_GEN_IIE;
2626 switch (pdev->revision) {
2628 hp_flags |= MV_HP_ERRATA_XX42A0;
2631 hp_flags |= MV_HP_ERRATA_60X1C0;
2634 dev_printk(KERN_WARNING, &pdev->dev,
2635 "Applying 60X1C0 workarounds to unknown rev\n");
2636 hp_flags |= MV_HP_ERRATA_60X1C0;
2642 dev_printk(KERN_ERR, &pdev->dev,
2643 "BUG: invalid board index %u\n", board_idx);
2647 hpriv->hp_flags = hp_flags;
2648 if (hp_flags & MV_HP_PCIE) {
2649 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2650 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2651 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2653 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2654 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2655 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2662 * mv_init_host - Perform some early initialization of the host.
2663 * @host: ATA host to initialize
2664 * @board_idx: controller index
2666 * If possible, do an early global reset of the host. Then do
2667 * our port init and clear/unmask all/relevant host interrupts.
2670 * Inherited from caller.
2672 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2674 int rc = 0, n_hc, port, hc;
2675 struct pci_dev *pdev = to_pci_dev(host->dev);
2676 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2677 struct mv_host_priv *hpriv = host->private_data;
2679 /* global interrupt mask */
2680 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2682 rc = mv_chip_id(host, board_idx);
2686 n_hc = mv_get_hc_count(host->ports[0]->flags);
2688 for (port = 0; port < host->n_ports; port++)
2689 hpriv->ops->read_preamp(hpriv, port, mmio);
2691 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2695 hpriv->ops->reset_flash(hpriv, mmio);
2696 hpriv->ops->reset_bus(pdev, mmio);
2697 hpriv->ops->enable_leds(hpriv, mmio);
2699 for (port = 0; port < host->n_ports; port++) {
2700 if (IS_GEN_II(hpriv)) {
2701 void __iomem *port_mmio = mv_port_base(mmio, port);
2703 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2704 ifctl |= (1 << 7); /* enable gen2i speed */
2705 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2706 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2709 hpriv->ops->phy_errata(hpriv, mmio, port);
2712 for (port = 0; port < host->n_ports; port++) {
2713 struct ata_port *ap = host->ports[port];
2714 void __iomem *port_mmio = mv_port_base(mmio, port);
2715 unsigned int offset = port_mmio - mmio;
2717 mv_port_init(&ap->ioaddr, port_mmio);
2719 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2720 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2723 for (hc = 0; hc < n_hc; hc++) {
2724 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2726 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2727 "(before clear)=0x%08x\n", hc,
2728 readl(hc_mmio + HC_CFG_OFS),
2729 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2731 /* Clear any currently outstanding hc interrupt conditions */
2732 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2735 /* Clear any currently outstanding host interrupt conditions */
2736 writelfl(0, mmio + hpriv->irq_cause_ofs);
2738 /* and unmask interrupt generation for host regs */
2739 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2741 if (IS_GEN_I(hpriv))
2742 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2744 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2746 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2747 "PCI int cause/mask=0x%08x/0x%08x\n",
2748 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2749 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2750 readl(mmio + hpriv->irq_cause_ofs),
2751 readl(mmio + hpriv->irq_mask_ofs));
2758 * mv_print_info - Dump key info to kernel log for perusal.
2759 * @host: ATA host to print info about
2761 * FIXME: complete this.
2764 * Inherited from caller.
2766 static void mv_print_info(struct ata_host *host)
2768 struct pci_dev *pdev = to_pci_dev(host->dev);
2769 struct mv_host_priv *hpriv = host->private_data;
2771 const char *scc_s, *gen;
2773 /* Use this to determine the HW stepping of the chip so we know
2774 * what errata to workaround
2776 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2779 else if (scc == 0x01)
2784 if (IS_GEN_I(hpriv))
2786 else if (IS_GEN_II(hpriv))
2788 else if (IS_GEN_IIE(hpriv))
2793 dev_printk(KERN_INFO, &pdev->dev,
2794 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2795 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2796 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2799 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2801 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2803 if (!hpriv->crqb_pool)
2806 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2808 if (!hpriv->crpb_pool)
2811 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2813 if (!hpriv->sg_tbl_pool)
2820 * mv_init_one - handle a positive probe of a Marvell host
2821 * @pdev: PCI device found
2822 * @ent: PCI device ID entry for the matched host
2825 * Inherited from caller.
2827 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2829 static int printed_version;
2830 unsigned int board_idx = (unsigned int)ent->driver_data;
2831 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2832 struct ata_host *host;
2833 struct mv_host_priv *hpriv;
2836 if (!printed_version++)
2837 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2840 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2842 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2843 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2844 if (!host || !hpriv)
2846 host->private_data = hpriv;
2848 /* acquire resources */
2849 rc = pcim_enable_device(pdev);
2853 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2855 pcim_pin_device(pdev);
2858 host->iomap = pcim_iomap_table(pdev);
2860 rc = pci_go_64(pdev);
2864 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2868 /* initialize adapter */
2869 rc = mv_init_host(host, board_idx);
2873 /* Enable interrupts */
2874 if (msi && pci_enable_msi(pdev))
2877 mv_dump_pci_cfg(pdev, 0x68);
2878 mv_print_info(host);
2880 pci_set_master(pdev);
2881 pci_try_set_mwi(pdev);
2882 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2883 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2886 static int __init mv_init(void)
2888 return pci_register_driver(&mv_pci_driver);
2891 static void __exit mv_exit(void)
2893 pci_unregister_driver(&mv_pci_driver);
2896 MODULE_AUTHOR("Brett Russ");
2897 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2898 MODULE_LICENSE("GPL");
2899 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2900 MODULE_VERSION(DRV_VERSION);
2902 module_param(msi, int, 0444);
2903 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2905 module_init(mv_init);
2906 module_exit(mv_exit);