2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
118 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
119 MV_PORT_HC_SHIFT = 2,
120 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
124 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
125 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
126 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
127 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
128 ATA_FLAG_PIO_POLLING,
129 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
131 CRQB_FLAG_READ = (1 << 0),
133 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
134 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
135 CRQB_CMD_ADDR_SHIFT = 8,
136 CRQB_CMD_CS = (0x2 << 11),
137 CRQB_CMD_LAST = (1 << 15),
139 CRPB_FLAG_STATUS_SHIFT = 8,
140 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
141 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
143 EPRD_FLAG_END_OF_TBL = (1 << 31),
145 /* PCI interface registers */
147 PCI_COMMAND_OFS = 0xc00,
149 PCI_MAIN_CMD_STS_OFS = 0xd30,
150 STOP_PCI_MASTER = (1 << 2),
151 PCI_MASTER_EMPTY = (1 << 3),
152 GLOB_SFT_RST = (1 << 4),
155 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
156 MV_PCI_DISC_TIMER = 0xd04,
157 MV_PCI_MSI_TRIGGER = 0xc38,
158 MV_PCI_SERR_MASK = 0xc28,
159 MV_PCI_XBAR_TMOUT = 0x1d04,
160 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
161 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
162 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
163 MV_PCI_ERR_COMMAND = 0x1d50,
165 PCI_IRQ_CAUSE_OFS = 0x1d58,
166 PCI_IRQ_MASK_OFS = 0x1d5c,
167 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
169 PCIE_IRQ_CAUSE_OFS = 0x1900,
170 PCIE_IRQ_MASK_OFS = 0x1910,
171 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
173 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
174 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
175 PORT0_ERR = (1 << 0), /* shift by port # */
176 PORT0_DONE = (1 << 1), /* shift by port # */
177 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
178 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
180 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
181 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
182 PORTS_0_3_COAL_DONE = (1 << 8),
183 PORTS_4_7_COAL_DONE = (1 << 17),
184 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
185 GPIO_INT = (1 << 22),
186 SELF_INT = (1 << 23),
187 TWSI_INT = (1 << 24),
188 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
189 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
190 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
191 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
193 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 /* SATAHC registers */
199 HC_IRQ_CAUSE_OFS = 0x14,
200 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
201 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
202 DEV_IRQ = (1 << 8), /* shift by port # */
204 /* Shadow block registers */
206 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
209 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
210 SATA_ACTIVE_OFS = 0x350,
211 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
218 SATA_INTERFACE_CTL = 0x050,
220 MV_M2_PREAMP_MASK = 0x7e0,
224 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
225 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
226 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
227 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
228 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
230 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
231 EDMA_ERR_IRQ_MASK_OFS = 0xc,
232 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
233 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
234 EDMA_ERR_DEV = (1 << 2), /* device error */
235 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
236 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
237 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
238 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
239 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
240 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
241 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
242 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
243 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
244 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
245 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
248 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
249 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
251 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
253 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
255 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
256 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
257 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
258 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
259 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
260 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
262 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
264 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
265 EDMA_ERR_OVERRUN_5 = (1 << 5),
266 EDMA_ERR_UNDERRUN_5 = (1 << 6),
268 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
269 EDMA_ERR_LNK_CTRL_RX_1 |
270 EDMA_ERR_LNK_CTRL_RX_3 |
271 EDMA_ERR_LNK_CTRL_TX,
273 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
283 EDMA_ERR_LNK_CTRL_RX_2 |
284 EDMA_ERR_LNK_DATA_RX |
285 EDMA_ERR_LNK_DATA_TX |
286 EDMA_ERR_TRANS_PROTO,
287 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
292 EDMA_ERR_UNDERRUN_5 |
293 EDMA_ERR_SELF_DIS_5 |
299 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
300 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
302 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
303 EDMA_REQ_Q_PTR_SHIFT = 5,
305 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
306 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
307 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
308 EDMA_RSP_Q_PTR_SHIFT = 3,
310 EDMA_CMD_OFS = 0x28, /* EDMA command register */
311 EDMA_EN = (1 << 0), /* enable EDMA */
312 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
313 ATA_RST = (1 << 2), /* reset trans/link/phy */
315 EDMA_IORDY_TMOUT = 0x34,
318 /* Host private flags (hp_flags) */
319 MV_HP_FLAG_MSI = (1 << 0),
320 MV_HP_ERRATA_50XXB0 = (1 << 1),
321 MV_HP_ERRATA_50XXB2 = (1 << 2),
322 MV_HP_ERRATA_60X1B2 = (1 << 3),
323 MV_HP_ERRATA_60X1C0 = (1 << 4),
324 MV_HP_ERRATA_XX42A0 = (1 << 5),
325 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
326 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
327 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
328 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
330 /* Port private flags (pp_flags) */
331 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
332 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
333 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
336 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
337 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
338 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
341 /* DMA boundary 0xffff is required by the s/g splitting
342 * we need on /length/ in mv_fill-sg().
344 MV_DMA_BOUNDARY = 0xffffU,
346 /* mask of register bits containing lower 32 bits
347 * of EDMA request queue DMA address
349 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
351 /* ditto, for response queue */
352 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
365 /* Command ReQuest Block: 32B */
381 /* Command ResPonse Block: 8B */
388 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
396 struct mv_port_priv {
397 struct mv_crqb *crqb;
399 struct mv_crpb *crpb;
401 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
402 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
404 unsigned int req_idx;
405 unsigned int resp_idx;
410 struct mv_port_signal {
415 struct mv_host_priv {
417 struct mv_port_signal signal[8];
418 const struct mv_hw_ops *ops;
423 * These consistent DMA memory pools give us guaranteed
424 * alignment for hardware-accessed data structures,
425 * and less memory waste in accomplishing the alignment.
427 struct dma_pool *crqb_pool;
428 struct dma_pool *crpb_pool;
429 struct dma_pool *sg_tbl_pool;
433 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
435 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
436 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
438 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
440 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
441 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
444 static void mv_irq_clear(struct ata_port *ap);
445 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
446 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
447 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
448 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
449 static int mv_port_start(struct ata_port *ap);
450 static void mv_port_stop(struct ata_port *ap);
451 static void mv_qc_prep(struct ata_queued_cmd *qc);
452 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
453 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
454 static void mv_error_handler(struct ata_port *ap);
455 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
456 static void mv_eh_freeze(struct ata_port *ap);
457 static void mv_eh_thaw(struct ata_port *ap);
458 static void mv6_dev_config(struct ata_device *dev);
459 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
461 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
463 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
464 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
466 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
468 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
469 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
471 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
473 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
474 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
476 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
478 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
479 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
480 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
481 unsigned int port_no);
482 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
483 void __iomem *port_mmio, int want_ncq);
484 static int __mv_stop_dma(struct ata_port *ap);
486 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
487 * because we have to allow room for worst case splitting of
488 * PRDs for 64K boundaries in mv_fill_sg().
490 static struct scsi_host_template mv5_sht = {
491 .module = THIS_MODULE,
493 .ioctl = ata_scsi_ioctl,
494 .queuecommand = ata_scsi_queuecmd,
495 .can_queue = ATA_DEF_QUEUE,
496 .this_id = ATA_SHT_THIS_ID,
497 .sg_tablesize = MV_MAX_SG_CT / 2,
498 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
499 .emulated = ATA_SHT_EMULATED,
501 .proc_name = DRV_NAME,
502 .dma_boundary = MV_DMA_BOUNDARY,
503 .slave_configure = ata_scsi_slave_config,
504 .slave_destroy = ata_scsi_slave_destroy,
505 .bios_param = ata_std_bios_param,
508 static struct scsi_host_template mv6_sht = {
509 .module = THIS_MODULE,
511 .ioctl = ata_scsi_ioctl,
512 .queuecommand = ata_scsi_queuecmd,
513 .can_queue = ATA_DEF_QUEUE,
514 .this_id = ATA_SHT_THIS_ID,
515 .sg_tablesize = MV_MAX_SG_CT / 2,
516 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
517 .emulated = ATA_SHT_EMULATED,
519 .proc_name = DRV_NAME,
520 .dma_boundary = MV_DMA_BOUNDARY,
521 .slave_configure = ata_scsi_slave_config,
522 .slave_destroy = ata_scsi_slave_destroy,
523 .bios_param = ata_std_bios_param,
526 static const struct ata_port_operations mv5_ops = {
527 .tf_load = ata_tf_load,
528 .tf_read = ata_tf_read,
529 .check_status = ata_check_status,
530 .exec_command = ata_exec_command,
531 .dev_select = ata_std_dev_select,
533 .cable_detect = ata_cable_sata,
535 .qc_prep = mv_qc_prep,
536 .qc_issue = mv_qc_issue,
537 .data_xfer = ata_data_xfer,
539 .irq_clear = mv_irq_clear,
540 .irq_on = ata_irq_on,
542 .error_handler = mv_error_handler,
543 .post_internal_cmd = mv_post_int_cmd,
544 .freeze = mv_eh_freeze,
547 .scr_read = mv5_scr_read,
548 .scr_write = mv5_scr_write,
550 .port_start = mv_port_start,
551 .port_stop = mv_port_stop,
554 static const struct ata_port_operations mv6_ops = {
555 .dev_config = mv6_dev_config,
556 .tf_load = ata_tf_load,
557 .tf_read = ata_tf_read,
558 .check_status = ata_check_status,
559 .exec_command = ata_exec_command,
560 .dev_select = ata_std_dev_select,
562 .cable_detect = ata_cable_sata,
564 .qc_prep = mv_qc_prep,
565 .qc_issue = mv_qc_issue,
566 .data_xfer = ata_data_xfer,
568 .irq_clear = mv_irq_clear,
569 .irq_on = ata_irq_on,
571 .error_handler = mv_error_handler,
572 .post_internal_cmd = mv_post_int_cmd,
573 .freeze = mv_eh_freeze,
576 .scr_read = mv_scr_read,
577 .scr_write = mv_scr_write,
579 .port_start = mv_port_start,
580 .port_stop = mv_port_stop,
583 static const struct ata_port_operations mv_iie_ops = {
584 .tf_load = ata_tf_load,
585 .tf_read = ata_tf_read,
586 .check_status = ata_check_status,
587 .exec_command = ata_exec_command,
588 .dev_select = ata_std_dev_select,
590 .cable_detect = ata_cable_sata,
592 .qc_prep = mv_qc_prep_iie,
593 .qc_issue = mv_qc_issue,
594 .data_xfer = ata_data_xfer,
596 .irq_clear = mv_irq_clear,
597 .irq_on = ata_irq_on,
599 .error_handler = mv_error_handler,
600 .post_internal_cmd = mv_post_int_cmd,
601 .freeze = mv_eh_freeze,
604 .scr_read = mv_scr_read,
605 .scr_write = mv_scr_write,
607 .port_start = mv_port_start,
608 .port_stop = mv_port_stop,
611 static const struct ata_port_info mv_port_info[] = {
613 .flags = MV_COMMON_FLAGS,
614 .pio_mask = 0x1f, /* pio0-4 */
615 .udma_mask = ATA_UDMA6,
616 .port_ops = &mv5_ops,
619 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
620 .pio_mask = 0x1f, /* pio0-4 */
621 .udma_mask = ATA_UDMA6,
622 .port_ops = &mv5_ops,
625 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
626 .pio_mask = 0x1f, /* pio0-4 */
627 .udma_mask = ATA_UDMA6,
628 .port_ops = &mv5_ops,
631 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
632 .pio_mask = 0x1f, /* pio0-4 */
633 .udma_mask = ATA_UDMA6,
634 .port_ops = &mv6_ops,
637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
639 .pio_mask = 0x1f, /* pio0-4 */
640 .udma_mask = ATA_UDMA6,
641 .port_ops = &mv6_ops,
644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
645 .pio_mask = 0x1f, /* pio0-4 */
646 .udma_mask = ATA_UDMA6,
647 .port_ops = &mv_iie_ops,
650 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
651 .pio_mask = 0x1f, /* pio0-4 */
652 .udma_mask = ATA_UDMA6,
653 .port_ops = &mv_iie_ops,
657 static const struct pci_device_id mv_pci_tbl[] = {
658 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
659 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
660 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
661 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
662 /* RocketRAID 1740/174x have different identifiers */
663 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
664 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
666 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
667 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
668 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
669 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
670 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
672 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
675 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
677 /* Marvell 7042 support */
678 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
680 /* Highpoint RocketRAID PCIe series */
681 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
682 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
684 { } /* terminate list */
687 static struct pci_driver mv_pci_driver = {
689 .id_table = mv_pci_tbl,
690 .probe = mv_init_one,
691 .remove = ata_pci_remove_one,
694 static const struct mv_hw_ops mv5xxx_ops = {
695 .phy_errata = mv5_phy_errata,
696 .enable_leds = mv5_enable_leds,
697 .read_preamp = mv5_read_preamp,
698 .reset_hc = mv5_reset_hc,
699 .reset_flash = mv5_reset_flash,
700 .reset_bus = mv5_reset_bus,
703 static const struct mv_hw_ops mv6xxx_ops = {
704 .phy_errata = mv6_phy_errata,
705 .enable_leds = mv6_enable_leds,
706 .read_preamp = mv6_read_preamp,
707 .reset_hc = mv6_reset_hc,
708 .reset_flash = mv6_reset_flash,
709 .reset_bus = mv_reset_pci_bus,
715 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
718 /* move to PCI layer or libata core? */
719 static int pci_go_64(struct pci_dev *pdev)
723 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
724 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
726 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
728 dev_printk(KERN_ERR, &pdev->dev,
729 "64-bit DMA enable failed\n");
734 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
736 dev_printk(KERN_ERR, &pdev->dev,
737 "32-bit DMA enable failed\n");
740 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
742 dev_printk(KERN_ERR, &pdev->dev,
743 "32-bit consistent DMA enable failed\n");
755 static inline void writelfl(unsigned long data, void __iomem *addr)
758 (void) readl(addr); /* flush to avoid PCI posted write */
761 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
763 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
766 static inline unsigned int mv_hc_from_port(unsigned int port)
768 return port >> MV_PORT_HC_SHIFT;
771 static inline unsigned int mv_hardport_from_port(unsigned int port)
773 return port & MV_PORT_MASK;
776 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
779 return mv_hc_base(base, mv_hc_from_port(port));
782 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
784 return mv_hc_base_from_port(base, port) +
785 MV_SATAHC_ARBTR_REG_SZ +
786 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
789 static inline void __iomem *mv_ap_base(struct ata_port *ap)
791 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
794 static inline int mv_get_hc_count(unsigned long port_flags)
796 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
799 static void mv_irq_clear(struct ata_port *ap)
803 static void mv_set_edma_ptrs(void __iomem *port_mmio,
804 struct mv_host_priv *hpriv,
805 struct mv_port_priv *pp)
810 * initialize request queue
812 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
814 WARN_ON(pp->crqb_dma & 0x3ff);
815 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
816 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
817 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
819 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
820 writelfl((pp->crqb_dma & 0xffffffff) | index,
821 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
823 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
826 * initialize response queue
828 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
830 WARN_ON(pp->crpb_dma & 0xff);
831 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
833 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
834 writelfl((pp->crpb_dma & 0xffffffff) | index,
835 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
837 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
839 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
840 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
844 * mv_start_dma - Enable eDMA engine
845 * @base: port base address
846 * @pp: port private data
848 * Verify the local cache of the eDMA state is accurate with a
852 * Inherited from caller.
854 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
855 struct mv_port_priv *pp, u8 protocol)
857 int want_ncq = (protocol == ATA_PROT_NCQ);
859 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
860 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
861 if (want_ncq != using_ncq)
864 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
865 struct mv_host_priv *hpriv = ap->host->private_data;
866 int hard_port = mv_hardport_from_port(ap->port_no);
867 void __iomem *hc_mmio = mv_hc_base_from_port(
868 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
869 u32 hc_irq_cause, ipending;
871 /* clear EDMA event indicators, if any */
872 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
874 /* clear EDMA interrupt indicator, if any */
875 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
876 ipending = (DEV_IRQ << hard_port) |
877 (CRPB_DMA_DONE << hard_port);
878 if (hc_irq_cause & ipending) {
879 writelfl(hc_irq_cause & ~ipending,
880 hc_mmio + HC_IRQ_CAUSE_OFS);
883 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
885 /* clear FIS IRQ Cause */
886 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
888 mv_set_edma_ptrs(port_mmio, hpriv, pp);
890 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
891 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
893 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
897 * __mv_stop_dma - Disable eDMA engine
898 * @ap: ATA channel to manipulate
900 * Verify the local cache of the eDMA state is accurate with a
904 * Inherited from caller.
906 static int __mv_stop_dma(struct ata_port *ap)
908 void __iomem *port_mmio = mv_ap_base(ap);
909 struct mv_port_priv *pp = ap->private_data;
913 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
914 /* Disable EDMA if active. The disable bit auto clears.
916 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
917 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
919 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
922 /* now properly wait for the eDMA to stop */
923 for (i = 1000; i > 0; i--) {
924 reg = readl(port_mmio + EDMA_CMD_OFS);
925 if (!(reg & EDMA_EN))
932 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
939 static int mv_stop_dma(struct ata_port *ap)
944 spin_lock_irqsave(&ap->host->lock, flags);
945 rc = __mv_stop_dma(ap);
946 spin_unlock_irqrestore(&ap->host->lock, flags);
952 static void mv_dump_mem(void __iomem *start, unsigned bytes)
955 for (b = 0; b < bytes; ) {
956 DPRINTK("%p: ", start + b);
957 for (w = 0; b < bytes && w < 4; w++) {
958 printk("%08x ", readl(start + b));
966 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
971 for (b = 0; b < bytes; ) {
972 DPRINTK("%02x: ", b);
973 for (w = 0; b < bytes && w < 4; w++) {
974 (void) pci_read_config_dword(pdev, b, &dw);
982 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
983 struct pci_dev *pdev)
986 void __iomem *hc_base = mv_hc_base(mmio_base,
987 port >> MV_PORT_HC_SHIFT);
988 void __iomem *port_base;
989 int start_port, num_ports, p, start_hc, num_hcs, hc;
992 start_hc = start_port = 0;
993 num_ports = 8; /* shld be benign for 4 port devs */
996 start_hc = port >> MV_PORT_HC_SHIFT;
998 num_ports = num_hcs = 1;
1000 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1001 num_ports > 1 ? num_ports - 1 : start_port);
1004 DPRINTK("PCI config space regs:\n");
1005 mv_dump_pci_cfg(pdev, 0x68);
1007 DPRINTK("PCI regs:\n");
1008 mv_dump_mem(mmio_base+0xc00, 0x3c);
1009 mv_dump_mem(mmio_base+0xd00, 0x34);
1010 mv_dump_mem(mmio_base+0xf00, 0x4);
1011 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1012 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1013 hc_base = mv_hc_base(mmio_base, hc);
1014 DPRINTK("HC regs (HC %i):\n", hc);
1015 mv_dump_mem(hc_base, 0x1c);
1017 for (p = start_port; p < start_port + num_ports; p++) {
1018 port_base = mv_port_base(mmio_base, p);
1019 DPRINTK("EDMA regs (port %i):\n", p);
1020 mv_dump_mem(port_base, 0x54);
1021 DPRINTK("SATA regs (port %i):\n", p);
1022 mv_dump_mem(port_base+0x300, 0x60);
1027 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1031 switch (sc_reg_in) {
1035 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1038 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1047 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1049 unsigned int ofs = mv_scr_offset(sc_reg_in);
1051 if (ofs != 0xffffffffU) {
1052 *val = readl(mv_ap_base(ap) + ofs);
1058 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1060 unsigned int ofs = mv_scr_offset(sc_reg_in);
1062 if (ofs != 0xffffffffU) {
1063 writelfl(val, mv_ap_base(ap) + ofs);
1069 static void mv6_dev_config(struct ata_device *adev)
1072 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1073 * See mv_qc_prep() for more info.
1075 if (adev->flags & ATA_DFLAG_NCQ)
1076 if (adev->max_sectors > ATA_MAX_SECTORS)
1077 adev->max_sectors = ATA_MAX_SECTORS;
1080 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1081 void __iomem *port_mmio, int want_ncq)
1085 /* set up non-NCQ EDMA configuration */
1086 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1088 if (IS_GEN_I(hpriv))
1089 cfg |= (1 << 8); /* enab config burst size mask */
1091 else if (IS_GEN_II(hpriv))
1092 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1094 else if (IS_GEN_IIE(hpriv)) {
1095 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1096 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1097 cfg |= (1 << 18); /* enab early completion */
1098 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1102 cfg |= EDMA_CFG_NCQ;
1103 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1105 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1107 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1110 static void mv_port_free_dma_mem(struct ata_port *ap)
1112 struct mv_host_priv *hpriv = ap->host->private_data;
1113 struct mv_port_priv *pp = ap->private_data;
1117 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1121 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1125 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1126 * For later hardware, we have one unique sg_tbl per NCQ tag.
1128 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1129 if (pp->sg_tbl[tag]) {
1130 if (tag == 0 || !IS_GEN_I(hpriv))
1131 dma_pool_free(hpriv->sg_tbl_pool,
1133 pp->sg_tbl_dma[tag]);
1134 pp->sg_tbl[tag] = NULL;
1140 * mv_port_start - Port specific init/start routine.
1141 * @ap: ATA channel to manipulate
1143 * Allocate and point to DMA memory, init port private memory,
1147 * Inherited from caller.
1149 static int mv_port_start(struct ata_port *ap)
1151 struct device *dev = ap->host->dev;
1152 struct mv_host_priv *hpriv = ap->host->private_data;
1153 struct mv_port_priv *pp;
1154 void __iomem *port_mmio = mv_ap_base(ap);
1155 unsigned long flags;
1158 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1161 ap->private_data = pp;
1163 rc = ata_pad_alloc(ap, dev);
1167 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1170 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1172 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1174 goto out_port_free_dma_mem;
1175 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1178 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1179 * For later hardware, we need one unique sg_tbl per NCQ tag.
1181 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1182 if (tag == 0 || !IS_GEN_I(hpriv)) {
1183 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1184 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1185 if (!pp->sg_tbl[tag])
1186 goto out_port_free_dma_mem;
1188 pp->sg_tbl[tag] = pp->sg_tbl[0];
1189 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1193 spin_lock_irqsave(&ap->host->lock, flags);
1195 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1196 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1198 spin_unlock_irqrestore(&ap->host->lock, flags);
1200 /* Don't turn on EDMA here...do it before DMA commands only. Else
1201 * we'll be unable to send non-data, PIO, etc due to restricted access
1206 out_port_free_dma_mem:
1207 mv_port_free_dma_mem(ap);
1212 * mv_port_stop - Port specific cleanup/stop routine.
1213 * @ap: ATA channel to manipulate
1215 * Stop DMA, cleanup port memory.
1218 * This routine uses the host lock to protect the DMA stop.
1220 static void mv_port_stop(struct ata_port *ap)
1223 mv_port_free_dma_mem(ap);
1227 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1228 * @qc: queued command whose SG list to source from
1230 * Populate the SG list and mark the last entry.
1233 * Inherited from caller.
1235 static void mv_fill_sg(struct ata_queued_cmd *qc)
1237 struct mv_port_priv *pp = qc->ap->private_data;
1238 struct scatterlist *sg;
1239 struct mv_sg *mv_sg, *last_sg = NULL;
1242 mv_sg = pp->sg_tbl[qc->tag];
1243 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1244 dma_addr_t addr = sg_dma_address(sg);
1245 u32 sg_len = sg_dma_len(sg);
1248 u32 offset = addr & 0xffff;
1251 if ((offset + sg_len > 0x10000))
1252 len = 0x10000 - offset;
1254 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1255 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1256 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1266 if (likely(last_sg))
1267 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1270 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1272 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1273 (last ? CRQB_CMD_LAST : 0);
1274 *cmdw = cpu_to_le16(tmp);
1278 * mv_qc_prep - Host specific command preparation.
1279 * @qc: queued command to prepare
1281 * This routine simply redirects to the general purpose routine
1282 * if command is not DMA. Else, it handles prep of the CRQB
1283 * (command request block), does some sanity checking, and calls
1284 * the SG load routine.
1287 * Inherited from caller.
1289 static void mv_qc_prep(struct ata_queued_cmd *qc)
1291 struct ata_port *ap = qc->ap;
1292 struct mv_port_priv *pp = ap->private_data;
1294 struct ata_taskfile *tf;
1298 if (qc->tf.protocol != ATA_PROT_DMA)
1301 /* Fill in command request block
1303 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1304 flags |= CRQB_FLAG_READ;
1305 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1306 flags |= qc->tag << CRQB_TAG_SHIFT;
1308 /* get current queue index from software */
1309 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1311 pp->crqb[in_index].sg_addr =
1312 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1313 pp->crqb[in_index].sg_addr_hi =
1314 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1315 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1317 cw = &pp->crqb[in_index].ata_cmd[0];
1320 /* Sadly, the CRQB cannot accomodate all registers--there are
1321 * only 11 bytes...so we must pick and choose required
1322 * registers based on the command. So, we drop feature and
1323 * hob_feature for [RW] DMA commands, but they are needed for
1324 * NCQ. NCQ will drop hob_nsect.
1326 switch (tf->command) {
1328 case ATA_CMD_READ_EXT:
1330 case ATA_CMD_WRITE_EXT:
1331 case ATA_CMD_WRITE_FUA_EXT:
1332 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1334 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1335 case ATA_CMD_FPDMA_READ:
1336 case ATA_CMD_FPDMA_WRITE:
1337 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1338 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1340 #endif /* FIXME: remove this line when NCQ added */
1342 /* The only other commands EDMA supports in non-queued and
1343 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1344 * of which are defined/used by Linux. If we get here, this
1345 * driver needs work.
1347 * FIXME: modify libata to give qc_prep a return value and
1348 * return error here.
1350 BUG_ON(tf->command);
1353 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1354 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1355 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1356 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1357 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1360 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1361 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1363 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1369 * mv_qc_prep_iie - Host specific command preparation.
1370 * @qc: queued command to prepare
1372 * This routine simply redirects to the general purpose routine
1373 * if command is not DMA. Else, it handles prep of the CRQB
1374 * (command request block), does some sanity checking, and calls
1375 * the SG load routine.
1378 * Inherited from caller.
1380 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1382 struct ata_port *ap = qc->ap;
1383 struct mv_port_priv *pp = ap->private_data;
1384 struct mv_crqb_iie *crqb;
1385 struct ata_taskfile *tf;
1389 if (qc->tf.protocol != ATA_PROT_DMA)
1392 /* Fill in Gen IIE command request block
1394 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1395 flags |= CRQB_FLAG_READ;
1397 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1398 flags |= qc->tag << CRQB_TAG_SHIFT;
1399 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1401 /* get current queue index from software */
1402 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1404 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1405 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1406 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1407 crqb->flags = cpu_to_le32(flags);
1410 crqb->ata_cmd[0] = cpu_to_le32(
1411 (tf->command << 16) |
1414 crqb->ata_cmd[1] = cpu_to_le32(
1420 crqb->ata_cmd[2] = cpu_to_le32(
1421 (tf->hob_lbal << 0) |
1422 (tf->hob_lbam << 8) |
1423 (tf->hob_lbah << 16) |
1424 (tf->hob_feature << 24)
1426 crqb->ata_cmd[3] = cpu_to_le32(
1428 (tf->hob_nsect << 8)
1431 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1437 * mv_qc_issue - Initiate a command to the host
1438 * @qc: queued command to start
1440 * This routine simply redirects to the general purpose routine
1441 * if command is not DMA. Else, it sanity checks our local
1442 * caches of the request producer/consumer indices then enables
1443 * DMA and bumps the request producer index.
1446 * Inherited from caller.
1448 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1450 struct ata_port *ap = qc->ap;
1451 void __iomem *port_mmio = mv_ap_base(ap);
1452 struct mv_port_priv *pp = ap->private_data;
1455 if (qc->tf.protocol != ATA_PROT_DMA) {
1456 /* We're about to send a non-EDMA capable command to the
1457 * port. Turn off EDMA so there won't be problems accessing
1458 * shadow block, etc registers.
1461 return ata_qc_issue_prot(qc);
1464 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1466 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1468 /* until we do queuing, the queue should be empty at this point */
1469 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1470 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1474 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1476 /* and write the request in pointer to kick the EDMA to life */
1477 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1478 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1484 * mv_err_intr - Handle error interrupts on the port
1485 * @ap: ATA channel to manipulate
1486 * @reset_allowed: bool: 0 == don't trigger from reset here
1488 * In most cases, just clear the interrupt and move on. However,
1489 * some cases require an eDMA reset, which is done right before
1490 * the COMRESET in mv_phy_reset(). The SERR case requires a
1491 * clear of pending errors in the SATA SERROR register. Finally,
1492 * if the port disabled DMA, update our cached copy to match.
1495 * Inherited from caller.
1497 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1499 void __iomem *port_mmio = mv_ap_base(ap);
1500 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1501 struct mv_port_priv *pp = ap->private_data;
1502 struct mv_host_priv *hpriv = ap->host->private_data;
1503 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1504 unsigned int action = 0, err_mask = 0;
1505 struct ata_eh_info *ehi = &ap->link.eh_info;
1507 ata_ehi_clear_desc(ehi);
1509 if (!edma_enabled) {
1510 /* just a guess: do we need to do this? should we
1511 * expand this, and do it in all cases?
1513 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1514 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1517 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1519 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1522 * all generations share these EDMA error cause bits
1525 if (edma_err_cause & EDMA_ERR_DEV)
1526 err_mask |= AC_ERR_DEV;
1527 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1528 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1529 EDMA_ERR_INTRL_PAR)) {
1530 err_mask |= AC_ERR_ATA_BUS;
1531 action |= ATA_EH_HARDRESET;
1532 ata_ehi_push_desc(ehi, "parity error");
1534 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1535 ata_ehi_hotplugged(ehi);
1536 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1537 "dev disconnect" : "dev connect");
1538 action |= ATA_EH_HARDRESET;
1541 if (IS_GEN_I(hpriv)) {
1542 eh_freeze_mask = EDMA_EH_FREEZE_5;
1544 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1545 struct mv_port_priv *pp = ap->private_data;
1546 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1547 ata_ehi_push_desc(ehi, "EDMA self-disable");
1550 eh_freeze_mask = EDMA_EH_FREEZE;
1552 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1553 struct mv_port_priv *pp = ap->private_data;
1554 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1555 ata_ehi_push_desc(ehi, "EDMA self-disable");
1558 if (edma_err_cause & EDMA_ERR_SERR) {
1559 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1560 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1561 err_mask = AC_ERR_ATA_BUS;
1562 action |= ATA_EH_HARDRESET;
1566 /* Clear EDMA now that SERR cleanup done */
1567 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1570 err_mask = AC_ERR_OTHER;
1571 action |= ATA_EH_HARDRESET;
1574 ehi->serror |= serr;
1575 ehi->action |= action;
1578 qc->err_mask |= err_mask;
1580 ehi->err_mask |= err_mask;
1582 if (edma_err_cause & eh_freeze_mask)
1583 ata_port_freeze(ap);
1588 static void mv_intr_pio(struct ata_port *ap)
1590 struct ata_queued_cmd *qc;
1593 /* ignore spurious intr if drive still BUSY */
1594 ata_status = readb(ap->ioaddr.status_addr);
1595 if (unlikely(ata_status & ATA_BUSY))
1598 /* get active ATA command */
1599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1600 if (unlikely(!qc)) /* no active tag */
1602 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1605 /* and finally, complete the ATA command */
1606 qc->err_mask |= ac_err_mask(ata_status);
1607 ata_qc_complete(qc);
1610 static void mv_intr_edma(struct ata_port *ap)
1612 void __iomem *port_mmio = mv_ap_base(ap);
1613 struct mv_host_priv *hpriv = ap->host->private_data;
1614 struct mv_port_priv *pp = ap->private_data;
1615 struct ata_queued_cmd *qc;
1616 u32 out_index, in_index;
1617 bool work_done = false;
1619 /* get h/w response queue pointer */
1620 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1621 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1627 /* get s/w response queue last-read pointer, and compare */
1628 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1629 if (in_index == out_index)
1632 /* 50xx: get active ATA command */
1633 if (IS_GEN_I(hpriv))
1634 tag = ap->link.active_tag;
1636 /* Gen II/IIE: get active ATA command via tag, to enable
1637 * support for queueing. this works transparently for
1638 * queued and non-queued modes.
1641 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1643 qc = ata_qc_from_tag(ap, tag);
1645 /* For non-NCQ mode, the lower 8 bits of status
1646 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1647 * which should be zero if all went well.
1649 status = le16_to_cpu(pp->crpb[out_index].flags);
1650 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1651 mv_err_intr(ap, qc);
1655 /* and finally, complete the ATA command */
1658 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1659 ata_qc_complete(qc);
1662 /* advance software response queue pointer, to
1663 * indicate (after the loop completes) to hardware
1664 * that we have consumed a response queue entry.
1671 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1672 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1673 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1677 * mv_host_intr - Handle all interrupts on the given host controller
1678 * @host: host specific structure
1679 * @relevant: port error bits relevant to this host controller
1680 * @hc: which host controller we're to look at
1682 * Read then write clear the HC interrupt status then walk each
1683 * port connected to the HC and see if it needs servicing. Port
1684 * success ints are reported in the HC interrupt status reg, the
1685 * port error ints are reported in the higher level main
1686 * interrupt status register and thus are passed in via the
1687 * 'relevant' argument.
1690 * Inherited from caller.
1692 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1694 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1695 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1702 port0 = MV_PORTS_PER_HC;
1704 /* we'll need the HC success int register in most cases */
1705 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1709 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1711 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1712 hc, relevant, hc_irq_cause);
1714 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1715 struct ata_port *ap = host->ports[port];
1716 struct mv_port_priv *pp = ap->private_data;
1717 int have_err_bits, hard_port, shift;
1719 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1722 shift = port << 1; /* (port * 2) */
1723 if (port >= MV_PORTS_PER_HC) {
1724 shift++; /* skip bit 8 in the HC Main IRQ reg */
1726 have_err_bits = ((PORT0_ERR << shift) & relevant);
1728 if (unlikely(have_err_bits)) {
1729 struct ata_queued_cmd *qc;
1731 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1732 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1735 mv_err_intr(ap, qc);
1739 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1741 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1742 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1745 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1752 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1754 struct mv_host_priv *hpriv = host->private_data;
1755 struct ata_port *ap;
1756 struct ata_queued_cmd *qc;
1757 struct ata_eh_info *ehi;
1758 unsigned int i, err_mask, printed = 0;
1761 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1763 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1766 DPRINTK("All regs @ PCI error\n");
1767 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1769 writelfl(0, mmio + hpriv->irq_cause_ofs);
1771 for (i = 0; i < host->n_ports; i++) {
1772 ap = host->ports[i];
1773 if (!ata_link_offline(&ap->link)) {
1774 ehi = &ap->link.eh_info;
1775 ata_ehi_clear_desc(ehi);
1777 ata_ehi_push_desc(ehi,
1778 "PCI err cause 0x%08x", err_cause);
1779 err_mask = AC_ERR_HOST_BUS;
1780 ehi->action = ATA_EH_HARDRESET;
1781 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1783 qc->err_mask |= err_mask;
1785 ehi->err_mask |= err_mask;
1787 ata_port_freeze(ap);
1793 * mv_interrupt - Main interrupt event handler
1795 * @dev_instance: private data; in this case the host structure
1797 * Read the read only register to determine if any host
1798 * controllers have pending interrupts. If so, call lower level
1799 * routine to handle. Also check for PCI errors which are only
1803 * This routine holds the host lock while processing pending
1806 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1808 struct ata_host *host = dev_instance;
1809 unsigned int hc, handled = 0, n_hcs;
1810 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1811 u32 irq_stat, irq_mask;
1813 spin_lock(&host->lock);
1814 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1815 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1817 /* check the cases where we either have nothing pending or have read
1818 * a bogus register value which can indicate HW removal or PCI fault
1820 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1823 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1825 if (unlikely(irq_stat & PCI_ERR)) {
1826 mv_pci_error(host, mmio);
1828 goto out_unlock; /* skip all other HC irq handling */
1831 for (hc = 0; hc < n_hcs; hc++) {
1832 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1834 mv_host_intr(host, relevant, hc);
1840 spin_unlock(&host->lock);
1842 return IRQ_RETVAL(handled);
1845 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1847 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1848 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1850 return hc_mmio + ofs;
1853 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1857 switch (sc_reg_in) {
1861 ofs = sc_reg_in * sizeof(u32);
1870 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1872 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1873 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1874 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1876 if (ofs != 0xffffffffU) {
1877 *val = readl(addr + ofs);
1883 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1885 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1886 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1887 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1889 if (ofs != 0xffffffffU) {
1890 writelfl(val, addr + ofs);
1896 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1900 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1903 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1905 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1908 mv_reset_pci_bus(pdev, mmio);
1911 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1913 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1916 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1919 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1922 tmp = readl(phy_mmio + MV5_PHY_MODE);
1924 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1925 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1928 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1932 writel(0, mmio + MV_GPIO_PORT_CTL);
1934 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1936 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1938 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1941 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1944 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1945 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1947 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1950 tmp = readl(phy_mmio + MV5_LT_MODE);
1952 writel(tmp, phy_mmio + MV5_LT_MODE);
1954 tmp = readl(phy_mmio + MV5_PHY_CTL);
1957 writel(tmp, phy_mmio + MV5_PHY_CTL);
1960 tmp = readl(phy_mmio + MV5_PHY_MODE);
1962 tmp |= hpriv->signal[port].pre;
1963 tmp |= hpriv->signal[port].amps;
1964 writel(tmp, phy_mmio + MV5_PHY_MODE);
1969 #define ZERO(reg) writel(0, port_mmio + (reg))
1970 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1973 void __iomem *port_mmio = mv_port_base(mmio, port);
1975 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1977 mv_channel_reset(hpriv, mmio, port);
1979 ZERO(0x028); /* command */
1980 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1981 ZERO(0x004); /* timer */
1982 ZERO(0x008); /* irq err cause */
1983 ZERO(0x00c); /* irq err mask */
1984 ZERO(0x010); /* rq bah */
1985 ZERO(0x014); /* rq inp */
1986 ZERO(0x018); /* rq outp */
1987 ZERO(0x01c); /* respq bah */
1988 ZERO(0x024); /* respq outp */
1989 ZERO(0x020); /* respq inp */
1990 ZERO(0x02c); /* test control */
1991 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1995 #define ZERO(reg) writel(0, hc_mmio + (reg))
1996 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1999 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2007 tmp = readl(hc_mmio + 0x20);
2010 writel(tmp, hc_mmio + 0x20);
2014 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2017 unsigned int hc, port;
2019 for (hc = 0; hc < n_hc; hc++) {
2020 for (port = 0; port < MV_PORTS_PER_HC; port++)
2021 mv5_reset_hc_port(hpriv, mmio,
2022 (hc * MV_PORTS_PER_HC) + port);
2024 mv5_reset_one_hc(hpriv, mmio, hc);
2031 #define ZERO(reg) writel(0, mmio + (reg))
2032 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
2034 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2035 struct mv_host_priv *hpriv = host->private_data;
2038 tmp = readl(mmio + MV_PCI_MODE);
2040 writel(tmp, mmio + MV_PCI_MODE);
2042 ZERO(MV_PCI_DISC_TIMER);
2043 ZERO(MV_PCI_MSI_TRIGGER);
2044 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2045 ZERO(HC_MAIN_IRQ_MASK_OFS);
2046 ZERO(MV_PCI_SERR_MASK);
2047 ZERO(hpriv->irq_cause_ofs);
2048 ZERO(hpriv->irq_mask_ofs);
2049 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2050 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2051 ZERO(MV_PCI_ERR_ATTRIBUTE);
2052 ZERO(MV_PCI_ERR_COMMAND);
2056 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2060 mv5_reset_flash(hpriv, mmio);
2062 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2064 tmp |= (1 << 5) | (1 << 6);
2065 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2069 * mv6_reset_hc - Perform the 6xxx global soft reset
2070 * @mmio: base address of the HBA
2072 * This routine only applies to 6xxx parts.
2075 * Inherited from caller.
2077 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2080 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2084 /* Following procedure defined in PCI "main command and status
2088 writel(t | STOP_PCI_MASTER, reg);
2090 for (i = 0; i < 1000; i++) {
2093 if (PCI_MASTER_EMPTY & t)
2096 if (!(PCI_MASTER_EMPTY & t)) {
2097 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2105 writel(t | GLOB_SFT_RST, reg);
2108 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2110 if (!(GLOB_SFT_RST & t)) {
2111 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2116 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2119 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2122 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2124 if (GLOB_SFT_RST & t) {
2125 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2132 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2135 void __iomem *port_mmio;
2138 tmp = readl(mmio + MV_RESET_CFG);
2139 if ((tmp & (1 << 0)) == 0) {
2140 hpriv->signal[idx].amps = 0x7 << 8;
2141 hpriv->signal[idx].pre = 0x1 << 5;
2145 port_mmio = mv_port_base(mmio, idx);
2146 tmp = readl(port_mmio + PHY_MODE2);
2148 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2149 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2152 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2154 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2157 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2160 void __iomem *port_mmio = mv_port_base(mmio, port);
2162 u32 hp_flags = hpriv->hp_flags;
2164 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2166 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2169 if (fix_phy_mode2) {
2170 m2 = readl(port_mmio + PHY_MODE2);
2173 writel(m2, port_mmio + PHY_MODE2);
2177 m2 = readl(port_mmio + PHY_MODE2);
2178 m2 &= ~((1 << 16) | (1 << 31));
2179 writel(m2, port_mmio + PHY_MODE2);
2184 /* who knows what this magic does */
2185 tmp = readl(port_mmio + PHY_MODE3);
2188 writel(tmp, port_mmio + PHY_MODE3);
2190 if (fix_phy_mode4) {
2193 m4 = readl(port_mmio + PHY_MODE4);
2195 if (hp_flags & MV_HP_ERRATA_60X1B2)
2196 tmp = readl(port_mmio + 0x310);
2198 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2200 writel(m4, port_mmio + PHY_MODE4);
2202 if (hp_flags & MV_HP_ERRATA_60X1B2)
2203 writel(tmp, port_mmio + 0x310);
2206 /* Revert values of pre-emphasis and signal amps to the saved ones */
2207 m2 = readl(port_mmio + PHY_MODE2);
2209 m2 &= ~MV_M2_PREAMP_MASK;
2210 m2 |= hpriv->signal[port].amps;
2211 m2 |= hpriv->signal[port].pre;
2214 /* according to mvSata 3.6.1, some IIE values are fixed */
2215 if (IS_GEN_IIE(hpriv)) {
2220 writel(m2, port_mmio + PHY_MODE2);
2223 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2224 unsigned int port_no)
2226 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2228 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2230 if (IS_GEN_II(hpriv)) {
2231 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2232 ifctl |= (1 << 7); /* enable gen2i speed */
2233 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2234 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2237 udelay(25); /* allow reset propagation */
2239 /* Spec never mentions clearing the bit. Marvell's driver does
2240 * clear the bit, however.
2242 writelfl(0, port_mmio + EDMA_CMD_OFS);
2244 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2246 if (IS_GEN_I(hpriv))
2251 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2252 * @ap: ATA channel to manipulate
2254 * Part of this is taken from __sata_phy_reset and modified to
2255 * not sleep since this routine gets called from interrupt level.
2258 * Inherited from caller. This is coded to safe to call at
2259 * interrupt level, i.e. it does not sleep.
2261 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2262 unsigned long deadline)
2264 struct mv_port_priv *pp = ap->private_data;
2265 struct mv_host_priv *hpriv = ap->host->private_data;
2266 void __iomem *port_mmio = mv_ap_base(ap);
2270 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2274 u32 sstatus, serror, scontrol;
2276 mv_scr_read(ap, SCR_STATUS, &sstatus);
2277 mv_scr_read(ap, SCR_ERROR, &serror);
2278 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2279 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2280 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2284 /* Issue COMRESET via SControl */
2286 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2289 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2293 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2294 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2298 } while (time_before(jiffies, deadline));
2300 /* work around errata */
2301 if (IS_GEN_II(hpriv) &&
2302 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2304 goto comreset_retry;
2308 u32 sstatus, serror, scontrol;
2310 mv_scr_read(ap, SCR_STATUS, &sstatus);
2311 mv_scr_read(ap, SCR_ERROR, &serror);
2312 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2313 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2314 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2318 if (ata_link_offline(&ap->link)) {
2319 *class = ATA_DEV_NONE;
2323 /* even after SStatus reflects that device is ready,
2324 * it seems to take a while for link to be fully
2325 * established (and thus Status no longer 0x80/0x7F),
2326 * so we poll a bit for that, here.
2330 u8 drv_stat = ata_check_status(ap);
2331 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2336 if (time_after(jiffies, deadline))
2340 /* FIXME: if we passed the deadline, the following
2341 * code probably produces an invalid result
2344 /* finally, read device signature from TF registers */
2345 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2347 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2349 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2354 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2356 struct ata_port *ap = link->ap;
2357 struct mv_port_priv *pp = ap->private_data;
2358 struct ata_eh_context *ehc = &link->eh_context;
2361 rc = mv_stop_dma(ap);
2363 ehc->i.action |= ATA_EH_HARDRESET;
2365 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2366 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2367 ehc->i.action |= ATA_EH_HARDRESET;
2370 /* if we're about to do hardreset, nothing more to do */
2371 if (ehc->i.action & ATA_EH_HARDRESET)
2374 if (ata_link_online(link))
2375 rc = ata_wait_ready(ap, deadline);
2382 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2383 unsigned long deadline)
2385 struct ata_port *ap = link->ap;
2386 struct mv_host_priv *hpriv = ap->host->private_data;
2387 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2391 mv_channel_reset(hpriv, mmio, ap->port_no);
2393 mv_phy_reset(ap, class, deadline);
2398 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2400 struct ata_port *ap = link->ap;
2403 /* print link status */
2404 sata_print_link_status(link);
2407 sata_scr_read(link, SCR_ERROR, &serr);
2408 sata_scr_write_flush(link, SCR_ERROR, serr);
2410 /* bail out if no device is present */
2411 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2412 DPRINTK("EXIT, no device\n");
2416 /* set up device control */
2417 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2420 static void mv_error_handler(struct ata_port *ap)
2422 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2423 mv_hardreset, mv_postreset);
2426 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2428 mv_stop_dma(qc->ap);
2431 static void mv_eh_freeze(struct ata_port *ap)
2433 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2434 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2438 /* FIXME: handle coalescing completion events properly */
2440 shift = ap->port_no * 2;
2444 mask = 0x3 << shift;
2446 /* disable assertion of portN err, done events */
2447 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2448 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2451 static void mv_eh_thaw(struct ata_port *ap)
2453 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2454 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2455 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2456 void __iomem *port_mmio = mv_ap_base(ap);
2457 u32 tmp, mask, hc_irq_cause;
2458 unsigned int shift, hc_port_no = ap->port_no;
2460 /* FIXME: handle coalescing completion events properly */
2462 shift = ap->port_no * 2;
2468 mask = 0x3 << shift;
2470 /* clear EDMA errors on this port */
2471 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2473 /* clear pending irq events */
2474 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2475 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2476 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2477 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2479 /* enable assertion of portN err, done events */
2480 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2481 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2485 * mv_port_init - Perform some early initialization on a single port.
2486 * @port: libata data structure storing shadow register addresses
2487 * @port_mmio: base address of the port
2489 * Initialize shadow register mmio addresses, clear outstanding
2490 * interrupts on the port, and unmask interrupts for the future
2491 * start of the port.
2494 * Inherited from caller.
2496 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2498 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2501 /* PIO related setup
2503 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2505 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2506 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2507 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2508 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2509 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2510 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2512 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2513 /* special case: control/altstatus doesn't have ATA_REG_ address */
2514 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2517 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2519 /* Clear any currently outstanding port interrupt conditions */
2520 serr_ofs = mv_scr_offset(SCR_ERROR);
2521 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2522 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2524 /* unmask all non-transient EDMA error interrupts */
2525 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2527 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2528 readl(port_mmio + EDMA_CFG_OFS),
2529 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2530 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2533 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2535 struct pci_dev *pdev = to_pci_dev(host->dev);
2536 struct mv_host_priv *hpriv = host->private_data;
2537 u32 hp_flags = hpriv->hp_flags;
2539 switch (board_idx) {
2541 hpriv->ops = &mv5xxx_ops;
2542 hp_flags |= MV_HP_GEN_I;
2544 switch (pdev->revision) {
2546 hp_flags |= MV_HP_ERRATA_50XXB0;
2549 hp_flags |= MV_HP_ERRATA_50XXB2;
2552 dev_printk(KERN_WARNING, &pdev->dev,
2553 "Applying 50XXB2 workarounds to unknown rev\n");
2554 hp_flags |= MV_HP_ERRATA_50XXB2;
2561 hpriv->ops = &mv5xxx_ops;
2562 hp_flags |= MV_HP_GEN_I;
2564 switch (pdev->revision) {
2566 hp_flags |= MV_HP_ERRATA_50XXB0;
2569 hp_flags |= MV_HP_ERRATA_50XXB2;
2572 dev_printk(KERN_WARNING, &pdev->dev,
2573 "Applying B2 workarounds to unknown rev\n");
2574 hp_flags |= MV_HP_ERRATA_50XXB2;
2581 hpriv->ops = &mv6xxx_ops;
2582 hp_flags |= MV_HP_GEN_II;
2584 switch (pdev->revision) {
2586 hp_flags |= MV_HP_ERRATA_60X1B2;
2589 hp_flags |= MV_HP_ERRATA_60X1C0;
2592 dev_printk(KERN_WARNING, &pdev->dev,
2593 "Applying B2 workarounds to unknown rev\n");
2594 hp_flags |= MV_HP_ERRATA_60X1B2;
2600 hp_flags |= MV_HP_PCIE;
2601 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2602 (pdev->device == 0x2300 || pdev->device == 0x2310))
2605 * Highpoint RocketRAID PCIe 23xx series cards:
2607 * Unconfigured drives are treated as "Legacy"
2608 * by the BIOS, and it overwrites sector 8 with
2609 * a "Lgcy" metadata block prior to Linux boot.
2611 * Configured drives (RAID or JBOD) leave sector 8
2612 * alone, but instead overwrite a high numbered
2613 * sector for the RAID metadata. This sector can
2614 * be determined exactly, by truncating the physical
2615 * drive capacity to a nice even GB value.
2617 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2619 * Warn the user, lest they think we're just buggy.
2621 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2622 " BIOS CORRUPTS DATA on all attached drives,"
2623 " regardless of if/how they are configured."
2625 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2626 " use sectors 8-9 on \"Legacy\" drives,"
2627 " and avoid the final two gigabytes on"
2628 " all RocketRAID BIOS initialized drives.\n");
2631 hpriv->ops = &mv6xxx_ops;
2632 hp_flags |= MV_HP_GEN_IIE;
2634 switch (pdev->revision) {
2636 hp_flags |= MV_HP_ERRATA_XX42A0;
2639 hp_flags |= MV_HP_ERRATA_60X1C0;
2642 dev_printk(KERN_WARNING, &pdev->dev,
2643 "Applying 60X1C0 workarounds to unknown rev\n");
2644 hp_flags |= MV_HP_ERRATA_60X1C0;
2650 dev_printk(KERN_ERR, &pdev->dev,
2651 "BUG: invalid board index %u\n", board_idx);
2655 hpriv->hp_flags = hp_flags;
2656 if (hp_flags & MV_HP_PCIE) {
2657 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2658 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2659 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2661 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2662 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2663 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2670 * mv_init_host - Perform some early initialization of the host.
2671 * @host: ATA host to initialize
2672 * @board_idx: controller index
2674 * If possible, do an early global reset of the host. Then do
2675 * our port init and clear/unmask all/relevant host interrupts.
2678 * Inherited from caller.
2680 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2682 int rc = 0, n_hc, port, hc;
2683 struct pci_dev *pdev = to_pci_dev(host->dev);
2684 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2685 struct mv_host_priv *hpriv = host->private_data;
2687 /* global interrupt mask */
2688 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2690 rc = mv_chip_id(host, board_idx);
2694 n_hc = mv_get_hc_count(host->ports[0]->flags);
2696 for (port = 0; port < host->n_ports; port++)
2697 hpriv->ops->read_preamp(hpriv, port, mmio);
2699 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2703 hpriv->ops->reset_flash(hpriv, mmio);
2704 hpriv->ops->reset_bus(pdev, mmio);
2705 hpriv->ops->enable_leds(hpriv, mmio);
2707 for (port = 0; port < host->n_ports; port++) {
2708 if (IS_GEN_II(hpriv)) {
2709 void __iomem *port_mmio = mv_port_base(mmio, port);
2711 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2712 ifctl |= (1 << 7); /* enable gen2i speed */
2713 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2714 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2717 hpriv->ops->phy_errata(hpriv, mmio, port);
2720 for (port = 0; port < host->n_ports; port++) {
2721 struct ata_port *ap = host->ports[port];
2722 void __iomem *port_mmio = mv_port_base(mmio, port);
2723 unsigned int offset = port_mmio - mmio;
2725 mv_port_init(&ap->ioaddr, port_mmio);
2727 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2728 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2731 for (hc = 0; hc < n_hc; hc++) {
2732 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2734 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2735 "(before clear)=0x%08x\n", hc,
2736 readl(hc_mmio + HC_CFG_OFS),
2737 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2739 /* Clear any currently outstanding hc interrupt conditions */
2740 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2743 /* Clear any currently outstanding host interrupt conditions */
2744 writelfl(0, mmio + hpriv->irq_cause_ofs);
2746 /* and unmask interrupt generation for host regs */
2747 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2749 if (IS_GEN_I(hpriv))
2750 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2752 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2754 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2755 "PCI int cause/mask=0x%08x/0x%08x\n",
2756 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2757 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2758 readl(mmio + hpriv->irq_cause_ofs),
2759 readl(mmio + hpriv->irq_mask_ofs));
2766 * mv_print_info - Dump key info to kernel log for perusal.
2767 * @host: ATA host to print info about
2769 * FIXME: complete this.
2772 * Inherited from caller.
2774 static void mv_print_info(struct ata_host *host)
2776 struct pci_dev *pdev = to_pci_dev(host->dev);
2777 struct mv_host_priv *hpriv = host->private_data;
2779 const char *scc_s, *gen;
2781 /* Use this to determine the HW stepping of the chip so we know
2782 * what errata to workaround
2784 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2787 else if (scc == 0x01)
2792 if (IS_GEN_I(hpriv))
2794 else if (IS_GEN_II(hpriv))
2796 else if (IS_GEN_IIE(hpriv))
2801 dev_printk(KERN_INFO, &pdev->dev,
2802 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2803 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2804 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2807 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2809 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2811 if (!hpriv->crqb_pool)
2814 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2816 if (!hpriv->crpb_pool)
2819 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2821 if (!hpriv->sg_tbl_pool)
2828 * mv_init_one - handle a positive probe of a Marvell host
2829 * @pdev: PCI device found
2830 * @ent: PCI device ID entry for the matched host
2833 * Inherited from caller.
2835 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2837 static int printed_version;
2838 unsigned int board_idx = (unsigned int)ent->driver_data;
2839 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2840 struct ata_host *host;
2841 struct mv_host_priv *hpriv;
2844 if (!printed_version++)
2845 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2848 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2850 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2851 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2852 if (!host || !hpriv)
2854 host->private_data = hpriv;
2856 /* acquire resources */
2857 rc = pcim_enable_device(pdev);
2861 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2863 pcim_pin_device(pdev);
2866 host->iomap = pcim_iomap_table(pdev);
2868 rc = pci_go_64(pdev);
2872 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2876 /* initialize adapter */
2877 rc = mv_init_host(host, board_idx);
2881 /* Enable interrupts */
2882 if (msi && pci_enable_msi(pdev))
2885 mv_dump_pci_cfg(pdev, 0x68);
2886 mv_print_info(host);
2888 pci_set_master(pdev);
2889 pci_try_set_mwi(pdev);
2890 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2891 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2894 static int __init mv_init(void)
2896 return pci_register_driver(&mv_pci_driver);
2899 static void __exit mv_exit(void)
2901 pci_unregister_driver(&mv_pci_driver);
2904 MODULE_AUTHOR("Brett Russ");
2905 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2906 MODULE_LICENSE("GPL");
2907 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2908 MODULE_VERSION(DRV_VERSION);
2910 module_param(msi, int, 0444);
2911 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2913 module_init(mv_init);
2914 module_exit(mv_exit);