2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
47 8) Develop a low-power-consumption strategy, and implement it.
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/pci.h>
70 #include <linux/init.h>
71 #include <linux/blkdev.h>
72 #include <linux/delay.h>
73 #include <linux/interrupt.h>
74 #include <linux/dmapool.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <linux/platform_device.h>
78 #include <linux/ata_platform.h>
79 #include <scsi/scsi_host.h>
80 #include <scsi/scsi_cmnd.h>
81 #include <scsi/scsi_device.h>
82 #include <linux/libata.h>
84 #define DRV_NAME "sata_mv"
85 #define DRV_VERSION "1.20"
88 /* BAR's are enumerated in terms of pci_resource_start() terms */
89 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
90 MV_IO_BAR = 2, /* offset 0x18: IO space */
91 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
94 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
97 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
98 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
99 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
100 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
101 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
102 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104 MV_SATAHC0_REG_BASE = 0x20000,
105 MV_FLASH_CTL = 0x1046c,
106 MV_GPIO_PORT_CTL = 0x104f0,
107 MV_RESET_CFG = 0x180d8,
109 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
110 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
112 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
115 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
118 * CRPB needs alignment on a 256B boundary. Size == 256B
119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
127 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
128 MV_PORT_HC_SHIFT = 2,
129 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
133 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
134 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
135 /* SoC integrated controllers, no PCI interface */
136 MV_FLAG_SOC = (1 << 28),
138 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
139 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
140 ATA_FLAG_PIO_POLLING,
141 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
143 CRQB_FLAG_READ = (1 << 0),
145 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
146 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
147 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
148 CRQB_CMD_ADDR_SHIFT = 8,
149 CRQB_CMD_CS = (0x2 << 11),
150 CRQB_CMD_LAST = (1 << 15),
152 CRPB_FLAG_STATUS_SHIFT = 8,
153 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
154 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
156 EPRD_FLAG_END_OF_TBL = (1 << 31),
158 /* PCI interface registers */
160 PCI_COMMAND_OFS = 0xc00,
162 PCI_MAIN_CMD_STS_OFS = 0xd30,
163 STOP_PCI_MASTER = (1 << 2),
164 PCI_MASTER_EMPTY = (1 << 3),
165 GLOB_SFT_RST = (1 << 4),
168 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
169 MV_PCI_DISC_TIMER = 0xd04,
170 MV_PCI_MSI_TRIGGER = 0xc38,
171 MV_PCI_SERR_MASK = 0xc28,
172 MV_PCI_XBAR_TMOUT = 0x1d04,
173 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
174 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
175 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
176 MV_PCI_ERR_COMMAND = 0x1d50,
178 PCI_IRQ_CAUSE_OFS = 0x1d58,
179 PCI_IRQ_MASK_OFS = 0x1d5c,
180 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182 PCIE_IRQ_CAUSE_OFS = 0x1900,
183 PCIE_IRQ_MASK_OFS = 0x1910,
184 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
186 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
187 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
188 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
189 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
190 PORT0_ERR = (1 << 0), /* shift by port # */
191 PORT0_DONE = (1 << 1), /* shift by port # */
192 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
193 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
196 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
197 PORTS_0_3_COAL_DONE = (1 << 8),
198 PORTS_4_7_COAL_DONE = (1 << 17),
199 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
200 GPIO_INT = (1 << 22),
201 SELF_INT = (1 << 23),
202 TWSI_INT = (1 << 24),
203 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
204 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
205 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
206 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
207 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
209 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
211 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
213 /* SATAHC registers */
216 HC_IRQ_CAUSE_OFS = 0x14,
217 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
218 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
219 DEV_IRQ = (1 << 8), /* shift by port # */
221 /* Shadow block registers */
223 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
226 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
227 SATA_ACTIVE_OFS = 0x350,
228 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
231 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
236 SATA_IFCTL_OFS = 0x344,
237 SATA_IFSTAT_OFS = 0x34c,
238 VENDOR_UNIQUE_FIS_OFS = 0x35c,
241 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
246 SATA_INTERFACE_CFG = 0x050,
248 MV_M2_PREAMP_MASK = 0x7e0,
252 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
253 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
254 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
255 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
256 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
257 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
258 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
260 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
261 EDMA_ERR_IRQ_MASK_OFS = 0xc,
262 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
263 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
264 EDMA_ERR_DEV = (1 << 2), /* device error */
265 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
266 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
267 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
268 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
269 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
270 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
271 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
272 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
273 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
274 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
275 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
277 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
278 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
281 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
283 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
285 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
286 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
287 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
288 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
289 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
290 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
292 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
294 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
295 EDMA_ERR_OVERRUN_5 = (1 << 5),
296 EDMA_ERR_UNDERRUN_5 = (1 << 6),
298 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
299 EDMA_ERR_LNK_CTRL_RX_1 |
300 EDMA_ERR_LNK_CTRL_RX_3 |
301 EDMA_ERR_LNK_CTRL_TX |
302 /* temporary, until we fix hotplug: */
303 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
305 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
315 EDMA_ERR_LNK_CTRL_RX_2 |
316 EDMA_ERR_LNK_DATA_RX |
317 EDMA_ERR_LNK_DATA_TX |
318 EDMA_ERR_TRANS_PROTO,
320 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
325 EDMA_ERR_UNDERRUN_5 |
326 EDMA_ERR_SELF_DIS_5 |
332 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
333 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
335 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
336 EDMA_REQ_Q_PTR_SHIFT = 5,
338 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
339 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
340 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
341 EDMA_RSP_Q_PTR_SHIFT = 3,
343 EDMA_CMD_OFS = 0x28, /* EDMA command register */
344 EDMA_EN = (1 << 0), /* enable EDMA */
345 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
346 ATA_RST = (1 << 2), /* reset trans/link/phy */
348 EDMA_IORDY_TMOUT = 0x34,
351 /* Host private flags (hp_flags) */
352 MV_HP_FLAG_MSI = (1 << 0),
353 MV_HP_ERRATA_50XXB0 = (1 << 1),
354 MV_HP_ERRATA_50XXB2 = (1 << 2),
355 MV_HP_ERRATA_60X1B2 = (1 << 3),
356 MV_HP_ERRATA_60X1C0 = (1 << 4),
357 MV_HP_ERRATA_XX42A0 = (1 << 5),
358 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
359 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
360 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
361 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
363 /* Port private flags (pp_flags) */
364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
368 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
369 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
370 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
371 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
374 /* DMA boundary 0xffff is required by the s/g splitting
375 * we need on /length/ in mv_fill-sg().
377 MV_DMA_BOUNDARY = 0xffffU,
379 /* mask of register bits containing lower 32 bits
380 * of EDMA request queue DMA address
382 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
384 /* ditto, for response queue */
385 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
399 /* Command ReQuest Block: 32B */
415 /* Command ResPonse Block: 8B */
422 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
430 struct mv_port_priv {
431 struct mv_crqb *crqb;
433 struct mv_crpb *crpb;
435 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
436 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
438 unsigned int req_idx;
439 unsigned int resp_idx;
444 struct mv_port_signal {
449 struct mv_host_priv {
451 struct mv_port_signal signal[8];
452 const struct mv_hw_ops *ops;
455 void __iomem *main_cause_reg_addr;
456 void __iomem *main_mask_reg_addr;
461 * These consistent DMA memory pools give us guaranteed
462 * alignment for hardware-accessed data structures,
463 * and less memory waste in accomplishing the alignment.
465 struct dma_pool *crqb_pool;
466 struct dma_pool *crpb_pool;
467 struct dma_pool *sg_tbl_pool;
471 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
473 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
474 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
476 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
478 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
479 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
482 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
483 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
484 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
485 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
486 static int mv_port_start(struct ata_port *ap);
487 static void mv_port_stop(struct ata_port *ap);
488 static void mv_qc_prep(struct ata_queued_cmd *qc);
489 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
490 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
491 static int mv_hardreset(struct ata_link *link, unsigned int *class,
492 unsigned long deadline);
493 static void mv_eh_freeze(struct ata_port *ap);
494 static void mv_eh_thaw(struct ata_port *ap);
495 static void mv6_dev_config(struct ata_device *dev);
497 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
499 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
500 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
502 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
504 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
505 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
507 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
509 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
510 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
512 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
514 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
515 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
517 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
519 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
520 void __iomem *mmio, unsigned int n_hc);
521 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
523 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
524 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
525 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
526 unsigned int port_no);
527 static int mv_stop_edma(struct ata_port *ap);
528 static int mv_stop_edma_engine(void __iomem *port_mmio);
529 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
531 static void mv_pmp_select(struct ata_port *ap, int pmp);
532 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
533 unsigned long deadline);
534 static int mv_softreset(struct ata_link *link, unsigned int *class,
535 unsigned long deadline);
537 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
538 * because we have to allow room for worst case splitting of
539 * PRDs for 64K boundaries in mv_fill_sg().
541 static struct scsi_host_template mv5_sht = {
542 ATA_BASE_SHT(DRV_NAME),
543 .sg_tablesize = MV_MAX_SG_CT / 2,
544 .dma_boundary = MV_DMA_BOUNDARY,
547 static struct scsi_host_template mv6_sht = {
548 ATA_NCQ_SHT(DRV_NAME),
549 .can_queue = MV_MAX_Q_DEPTH - 1,
550 .sg_tablesize = MV_MAX_SG_CT / 2,
551 .dma_boundary = MV_DMA_BOUNDARY,
554 static struct ata_port_operations mv5_ops = {
555 .inherits = &ata_sff_port_ops,
557 .qc_prep = mv_qc_prep,
558 .qc_issue = mv_qc_issue,
560 .freeze = mv_eh_freeze,
562 .hardreset = mv_hardreset,
563 .error_handler = ata_std_error_handler, /* avoid SFF EH */
564 .post_internal_cmd = ATA_OP_NULL,
566 .scr_read = mv5_scr_read,
567 .scr_write = mv5_scr_write,
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
573 static struct ata_port_operations mv6_ops = {
574 .inherits = &mv5_ops,
575 .qc_defer = sata_pmp_qc_defer_cmd_switch,
576 .dev_config = mv6_dev_config,
577 .scr_read = mv_scr_read,
578 .scr_write = mv_scr_write,
580 .pmp_hardreset = mv_pmp_hardreset,
581 .pmp_softreset = mv_softreset,
582 .softreset = mv_softreset,
583 .error_handler = sata_pmp_error_handler,
586 static struct ata_port_operations mv_iie_ops = {
587 .inherits = &mv6_ops,
588 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
589 .dev_config = ATA_OP_NULL,
590 .qc_prep = mv_qc_prep_iie,
593 static const struct ata_port_info mv_port_info[] = {
595 .flags = MV_COMMON_FLAGS,
596 .pio_mask = 0x1f, /* pio0-4 */
597 .udma_mask = ATA_UDMA6,
598 .port_ops = &mv5_ops,
601 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
602 .pio_mask = 0x1f, /* pio0-4 */
603 .udma_mask = ATA_UDMA6,
604 .port_ops = &mv5_ops,
607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
608 .pio_mask = 0x1f, /* pio0-4 */
609 .udma_mask = ATA_UDMA6,
610 .port_ops = &mv5_ops,
613 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
614 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
616 .pio_mask = 0x1f, /* pio0-4 */
617 .udma_mask = ATA_UDMA6,
618 .port_ops = &mv6_ops,
621 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
622 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
623 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
624 .pio_mask = 0x1f, /* pio0-4 */
625 .udma_mask = ATA_UDMA6,
626 .port_ops = &mv6_ops,
629 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
630 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
632 .pio_mask = 0x1f, /* pio0-4 */
633 .udma_mask = ATA_UDMA6,
634 .port_ops = &mv_iie_ops,
637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
638 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
640 .pio_mask = 0x1f, /* pio0-4 */
641 .udma_mask = ATA_UDMA6,
642 .port_ops = &mv_iie_ops,
645 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
646 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
647 ATA_FLAG_NCQ | MV_FLAG_SOC,
648 .pio_mask = 0x1f, /* pio0-4 */
649 .udma_mask = ATA_UDMA6,
650 .port_ops = &mv_iie_ops,
654 static const struct pci_device_id mv_pci_tbl[] = {
655 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
656 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
657 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
658 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
659 /* RocketRAID 1740/174x have different identifiers */
660 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
661 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
663 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
664 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
665 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
666 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
667 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
669 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
672 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
674 /* Marvell 7042 support */
675 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
677 /* Highpoint RocketRAID PCIe series */
678 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
679 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
681 { } /* terminate list */
684 static const struct mv_hw_ops mv5xxx_ops = {
685 .phy_errata = mv5_phy_errata,
686 .enable_leds = mv5_enable_leds,
687 .read_preamp = mv5_read_preamp,
688 .reset_hc = mv5_reset_hc,
689 .reset_flash = mv5_reset_flash,
690 .reset_bus = mv5_reset_bus,
693 static const struct mv_hw_ops mv6xxx_ops = {
694 .phy_errata = mv6_phy_errata,
695 .enable_leds = mv6_enable_leds,
696 .read_preamp = mv6_read_preamp,
697 .reset_hc = mv6_reset_hc,
698 .reset_flash = mv6_reset_flash,
699 .reset_bus = mv_reset_pci_bus,
702 static const struct mv_hw_ops mv_soc_ops = {
703 .phy_errata = mv6_phy_errata,
704 .enable_leds = mv_soc_enable_leds,
705 .read_preamp = mv_soc_read_preamp,
706 .reset_hc = mv_soc_reset_hc,
707 .reset_flash = mv_soc_reset_flash,
708 .reset_bus = mv_soc_reset_bus,
715 static inline void writelfl(unsigned long data, void __iomem *addr)
718 (void) readl(addr); /* flush to avoid PCI posted write */
721 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
723 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
726 static inline unsigned int mv_hc_from_port(unsigned int port)
728 return port >> MV_PORT_HC_SHIFT;
731 static inline unsigned int mv_hardport_from_port(unsigned int port)
733 return port & MV_PORT_MASK;
736 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
739 return mv_hc_base(base, mv_hc_from_port(port));
742 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
744 return mv_hc_base_from_port(base, port) +
745 MV_SATAHC_ARBTR_REG_SZ +
746 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
749 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
751 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
752 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
754 return hc_mmio + ofs;
757 static inline void __iomem *mv_host_base(struct ata_host *host)
759 struct mv_host_priv *hpriv = host->private_data;
763 static inline void __iomem *mv_ap_base(struct ata_port *ap)
765 return mv_port_base(mv_host_base(ap->host), ap->port_no);
768 static inline int mv_get_hc_count(unsigned long port_flags)
770 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
773 static void mv_set_edma_ptrs(void __iomem *port_mmio,
774 struct mv_host_priv *hpriv,
775 struct mv_port_priv *pp)
780 * initialize request queue
782 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
784 WARN_ON(pp->crqb_dma & 0x3ff);
785 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
786 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
787 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
789 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
790 writelfl((pp->crqb_dma & 0xffffffff) | index,
791 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
793 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
796 * initialize response queue
798 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
800 WARN_ON(pp->crpb_dma & 0xff);
801 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
803 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
804 writelfl((pp->crpb_dma & 0xffffffff) | index,
805 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
807 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
809 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
810 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
814 * mv_start_dma - Enable eDMA engine
815 * @base: port base address
816 * @pp: port private data
818 * Verify the local cache of the eDMA state is accurate with a
822 * Inherited from caller.
824 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
825 struct mv_port_priv *pp, u8 protocol)
827 int want_ncq = (protocol == ATA_PROT_NCQ);
829 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
830 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
831 if (want_ncq != using_ncq)
834 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
835 struct mv_host_priv *hpriv = ap->host->private_data;
836 int hard_port = mv_hardport_from_port(ap->port_no);
837 void __iomem *hc_mmio = mv_hc_base_from_port(
838 mv_host_base(ap->host), hard_port);
839 u32 hc_irq_cause, ipending;
841 /* clear EDMA event indicators, if any */
842 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
844 /* clear EDMA interrupt indicator, if any */
845 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
846 ipending = (DEV_IRQ << hard_port) |
847 (CRPB_DMA_DONE << hard_port);
848 if (hc_irq_cause & ipending) {
849 writelfl(hc_irq_cause & ~ipending,
850 hc_mmio + HC_IRQ_CAUSE_OFS);
853 mv_edma_cfg(ap, want_ncq);
855 /* clear FIS IRQ Cause */
856 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
858 mv_set_edma_ptrs(port_mmio, hpriv, pp);
860 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
861 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
863 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
867 * mv_stop_edma_engine - Disable eDMA engine
868 * @port_mmio: io base address
871 * Inherited from caller.
873 static int mv_stop_edma_engine(void __iomem *port_mmio)
877 /* Disable eDMA. The disable bit auto clears. */
878 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
880 /* Wait for the chip to confirm eDMA is off. */
881 for (i = 10000; i > 0; i--) {
882 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
883 if (!(reg & EDMA_EN))
890 static int mv_stop_edma(struct ata_port *ap)
892 void __iomem *port_mmio = mv_ap_base(ap);
893 struct mv_port_priv *pp = ap->private_data;
895 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
897 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
898 if (mv_stop_edma_engine(port_mmio)) {
899 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
906 static void mv_dump_mem(void __iomem *start, unsigned bytes)
909 for (b = 0; b < bytes; ) {
910 DPRINTK("%p: ", start + b);
911 for (w = 0; b < bytes && w < 4; w++) {
912 printk("%08x ", readl(start + b));
920 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
925 for (b = 0; b < bytes; ) {
926 DPRINTK("%02x: ", b);
927 for (w = 0; b < bytes && w < 4; w++) {
928 (void) pci_read_config_dword(pdev, b, &dw);
936 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
937 struct pci_dev *pdev)
940 void __iomem *hc_base = mv_hc_base(mmio_base,
941 port >> MV_PORT_HC_SHIFT);
942 void __iomem *port_base;
943 int start_port, num_ports, p, start_hc, num_hcs, hc;
946 start_hc = start_port = 0;
947 num_ports = 8; /* shld be benign for 4 port devs */
950 start_hc = port >> MV_PORT_HC_SHIFT;
952 num_ports = num_hcs = 1;
954 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
955 num_ports > 1 ? num_ports - 1 : start_port);
958 DPRINTK("PCI config space regs:\n");
959 mv_dump_pci_cfg(pdev, 0x68);
961 DPRINTK("PCI regs:\n");
962 mv_dump_mem(mmio_base+0xc00, 0x3c);
963 mv_dump_mem(mmio_base+0xd00, 0x34);
964 mv_dump_mem(mmio_base+0xf00, 0x4);
965 mv_dump_mem(mmio_base+0x1d00, 0x6c);
966 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
967 hc_base = mv_hc_base(mmio_base, hc);
968 DPRINTK("HC regs (HC %i):\n", hc);
969 mv_dump_mem(hc_base, 0x1c);
971 for (p = start_port; p < start_port + num_ports; p++) {
972 port_base = mv_port_base(mmio_base, p);
973 DPRINTK("EDMA regs (port %i):\n", p);
974 mv_dump_mem(port_base, 0x54);
975 DPRINTK("SATA regs (port %i):\n", p);
976 mv_dump_mem(port_base+0x300, 0x60);
981 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
989 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
992 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1001 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1003 unsigned int ofs = mv_scr_offset(sc_reg_in);
1005 if (ofs != 0xffffffffU) {
1006 *val = readl(mv_ap_base(ap) + ofs);
1012 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1014 unsigned int ofs = mv_scr_offset(sc_reg_in);
1016 if (ofs != 0xffffffffU) {
1017 writelfl(val, mv_ap_base(ap) + ofs);
1023 static void mv6_dev_config(struct ata_device *adev)
1026 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1028 * Gen-II does not support NCQ over a port multiplier
1029 * (no FIS-based switching).
1031 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1032 * See mv_qc_prep() for more info.
1034 if (adev->flags & ATA_DFLAG_NCQ) {
1035 if (sata_pmp_attached(adev->link->ap))
1036 adev->flags &= ~ATA_DFLAG_NCQ;
1037 else if (adev->max_sectors > ATA_MAX_SECTORS)
1038 adev->max_sectors = ATA_MAX_SECTORS;
1042 static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1044 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1046 * Various bit settings required for operation
1047 * in FIS-based switching (fbs) mode on GenIIe:
1049 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1050 old_ltmode = readl(port_mmio + LTMODE_OFS);
1052 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1053 new_ltmode = old_ltmode | LTMODE_BIT8;
1054 } else { /* disable fbs */
1055 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1056 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1058 if (new_fcfg != old_fcfg)
1059 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1060 if (new_ltmode != old_ltmode)
1061 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1064 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1067 struct mv_port_priv *pp = ap->private_data;
1068 struct mv_host_priv *hpriv = ap->host->private_data;
1069 void __iomem *port_mmio = mv_ap_base(ap);
1071 /* set up non-NCQ EDMA configuration */
1072 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1074 if (IS_GEN_I(hpriv))
1075 cfg |= (1 << 8); /* enab config burst size mask */
1077 else if (IS_GEN_II(hpriv))
1078 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1080 else if (IS_GEN_IIE(hpriv)) {
1081 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1082 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1083 cfg |= (1 << 18); /* enab early completion */
1084 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1086 if (want_ncq && sata_pmp_attached(ap)) {
1087 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1088 mv_config_fbs(port_mmio, 1);
1090 mv_config_fbs(port_mmio, 0);
1095 cfg |= EDMA_CFG_NCQ;
1096 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1098 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1100 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1103 static void mv_port_free_dma_mem(struct ata_port *ap)
1105 struct mv_host_priv *hpriv = ap->host->private_data;
1106 struct mv_port_priv *pp = ap->private_data;
1110 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1114 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1118 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1119 * For later hardware, we have one unique sg_tbl per NCQ tag.
1121 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1122 if (pp->sg_tbl[tag]) {
1123 if (tag == 0 || !IS_GEN_I(hpriv))
1124 dma_pool_free(hpriv->sg_tbl_pool,
1126 pp->sg_tbl_dma[tag]);
1127 pp->sg_tbl[tag] = NULL;
1133 * mv_port_start - Port specific init/start routine.
1134 * @ap: ATA channel to manipulate
1136 * Allocate and point to DMA memory, init port private memory,
1140 * Inherited from caller.
1142 static int mv_port_start(struct ata_port *ap)
1144 struct device *dev = ap->host->dev;
1145 struct mv_host_priv *hpriv = ap->host->private_data;
1146 struct mv_port_priv *pp;
1147 void __iomem *port_mmio = mv_ap_base(ap);
1148 unsigned long flags;
1151 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1154 ap->private_data = pp;
1156 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1159 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1161 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1163 goto out_port_free_dma_mem;
1164 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1167 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1168 * For later hardware, we need one unique sg_tbl per NCQ tag.
1170 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1171 if (tag == 0 || !IS_GEN_I(hpriv)) {
1172 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1173 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1174 if (!pp->sg_tbl[tag])
1175 goto out_port_free_dma_mem;
1177 pp->sg_tbl[tag] = pp->sg_tbl[0];
1178 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1182 spin_lock_irqsave(&ap->host->lock, flags);
1185 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1187 spin_unlock_irqrestore(&ap->host->lock, flags);
1189 /* Don't turn on EDMA here...do it before DMA commands only. Else
1190 * we'll be unable to send non-data, PIO, etc due to restricted access
1195 out_port_free_dma_mem:
1196 mv_port_free_dma_mem(ap);
1201 * mv_port_stop - Port specific cleanup/stop routine.
1202 * @ap: ATA channel to manipulate
1204 * Stop DMA, cleanup port memory.
1207 * This routine uses the host lock to protect the DMA stop.
1209 static void mv_port_stop(struct ata_port *ap)
1212 mv_port_free_dma_mem(ap);
1216 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1217 * @qc: queued command whose SG list to source from
1219 * Populate the SG list and mark the last entry.
1222 * Inherited from caller.
1224 static void mv_fill_sg(struct ata_queued_cmd *qc)
1226 struct mv_port_priv *pp = qc->ap->private_data;
1227 struct scatterlist *sg;
1228 struct mv_sg *mv_sg, *last_sg = NULL;
1231 mv_sg = pp->sg_tbl[qc->tag];
1232 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1233 dma_addr_t addr = sg_dma_address(sg);
1234 u32 sg_len = sg_dma_len(sg);
1237 u32 offset = addr & 0xffff;
1240 if ((offset + sg_len > 0x10000))
1241 len = 0x10000 - offset;
1243 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1244 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1245 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1255 if (likely(last_sg))
1256 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1259 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1261 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1262 (last ? CRQB_CMD_LAST : 0);
1263 *cmdw = cpu_to_le16(tmp);
1267 * mv_qc_prep - Host specific command preparation.
1268 * @qc: queued command to prepare
1270 * This routine simply redirects to the general purpose routine
1271 * if command is not DMA. Else, it handles prep of the CRQB
1272 * (command request block), does some sanity checking, and calls
1273 * the SG load routine.
1276 * Inherited from caller.
1278 static void mv_qc_prep(struct ata_queued_cmd *qc)
1280 struct ata_port *ap = qc->ap;
1281 struct mv_port_priv *pp = ap->private_data;
1283 struct ata_taskfile *tf;
1287 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1288 (qc->tf.protocol != ATA_PROT_NCQ))
1291 /* Fill in command request block
1293 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1294 flags |= CRQB_FLAG_READ;
1295 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1296 flags |= qc->tag << CRQB_TAG_SHIFT;
1297 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1299 /* get current queue index from software */
1300 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1302 pp->crqb[in_index].sg_addr =
1303 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1304 pp->crqb[in_index].sg_addr_hi =
1305 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1306 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1308 cw = &pp->crqb[in_index].ata_cmd[0];
1311 /* Sadly, the CRQB cannot accomodate all registers--there are
1312 * only 11 bytes...so we must pick and choose required
1313 * registers based on the command. So, we drop feature and
1314 * hob_feature for [RW] DMA commands, but they are needed for
1315 * NCQ. NCQ will drop hob_nsect.
1317 switch (tf->command) {
1319 case ATA_CMD_READ_EXT:
1321 case ATA_CMD_WRITE_EXT:
1322 case ATA_CMD_WRITE_FUA_EXT:
1323 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1325 case ATA_CMD_FPDMA_READ:
1326 case ATA_CMD_FPDMA_WRITE:
1327 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1328 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1331 /* The only other commands EDMA supports in non-queued and
1332 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1333 * of which are defined/used by Linux. If we get here, this
1334 * driver needs work.
1336 * FIXME: modify libata to give qc_prep a return value and
1337 * return error here.
1339 BUG_ON(tf->command);
1342 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1343 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1344 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1345 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1346 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1347 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1348 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1349 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1350 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1352 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1358 * mv_qc_prep_iie - Host specific command preparation.
1359 * @qc: queued command to prepare
1361 * This routine simply redirects to the general purpose routine
1362 * if command is not DMA. Else, it handles prep of the CRQB
1363 * (command request block), does some sanity checking, and calls
1364 * the SG load routine.
1367 * Inherited from caller.
1369 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1371 struct ata_port *ap = qc->ap;
1372 struct mv_port_priv *pp = ap->private_data;
1373 struct mv_crqb_iie *crqb;
1374 struct ata_taskfile *tf;
1378 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1379 (qc->tf.protocol != ATA_PROT_NCQ))
1382 /* Fill in Gen IIE command request block */
1383 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1384 flags |= CRQB_FLAG_READ;
1386 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1387 flags |= qc->tag << CRQB_TAG_SHIFT;
1388 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1389 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1391 /* get current queue index from software */
1392 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1394 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1395 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1396 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1397 crqb->flags = cpu_to_le32(flags);
1400 crqb->ata_cmd[0] = cpu_to_le32(
1401 (tf->command << 16) |
1404 crqb->ata_cmd[1] = cpu_to_le32(
1410 crqb->ata_cmd[2] = cpu_to_le32(
1411 (tf->hob_lbal << 0) |
1412 (tf->hob_lbam << 8) |
1413 (tf->hob_lbah << 16) |
1414 (tf->hob_feature << 24)
1416 crqb->ata_cmd[3] = cpu_to_le32(
1418 (tf->hob_nsect << 8)
1421 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1427 * mv_qc_issue - Initiate a command to the host
1428 * @qc: queued command to start
1430 * This routine simply redirects to the general purpose routine
1431 * if command is not DMA. Else, it sanity checks our local
1432 * caches of the request producer/consumer indices then enables
1433 * DMA and bumps the request producer index.
1436 * Inherited from caller.
1438 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1440 struct ata_port *ap = qc->ap;
1441 void __iomem *port_mmio = mv_ap_base(ap);
1442 struct mv_port_priv *pp = ap->private_data;
1445 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1446 (qc->tf.protocol != ATA_PROT_NCQ)) {
1448 * We're about to send a non-EDMA capable command to the
1449 * port. Turn off EDMA so there won't be problems accessing
1450 * shadow block, etc registers.
1453 mv_pmp_select(ap, qc->dev->link->pmp);
1454 return ata_sff_qc_issue(qc);
1457 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1461 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1463 /* and write the request in pointer to kick the EDMA to life */
1464 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1465 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1471 * mv_err_intr - Handle error interrupts on the port
1472 * @ap: ATA channel to manipulate
1473 * @reset_allowed: bool: 0 == don't trigger from reset here
1475 * In most cases, just clear the interrupt and move on. However,
1476 * some cases require an eDMA reset, which also performs a COMRESET.
1477 * The SERR case requires a clear of pending errors in the SATA
1478 * SERROR register. Finally, if the port disabled DMA,
1479 * update our cached copy to match.
1482 * Inherited from caller.
1484 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1486 void __iomem *port_mmio = mv_ap_base(ap);
1487 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1488 struct mv_port_priv *pp = ap->private_data;
1489 struct mv_host_priv *hpriv = ap->host->private_data;
1490 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1491 unsigned int action = 0, err_mask = 0;
1492 struct ata_eh_info *ehi = &ap->link.eh_info;
1494 ata_ehi_clear_desc(ehi);
1496 if (!edma_enabled) {
1497 /* just a guess: do we need to do this? should we
1498 * expand this, and do it in all cases?
1500 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1501 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1504 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1506 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1509 * all generations share these EDMA error cause bits
1512 if (edma_err_cause & EDMA_ERR_DEV)
1513 err_mask |= AC_ERR_DEV;
1514 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1515 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1516 EDMA_ERR_INTRL_PAR)) {
1517 err_mask |= AC_ERR_ATA_BUS;
1518 action |= ATA_EH_RESET;
1519 ata_ehi_push_desc(ehi, "parity error");
1521 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1522 ata_ehi_hotplugged(ehi);
1523 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1524 "dev disconnect" : "dev connect");
1525 action |= ATA_EH_RESET;
1528 if (IS_GEN_I(hpriv)) {
1529 eh_freeze_mask = EDMA_EH_FREEZE_5;
1531 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1532 pp = ap->private_data;
1533 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1534 ata_ehi_push_desc(ehi, "EDMA self-disable");
1537 eh_freeze_mask = EDMA_EH_FREEZE;
1539 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1540 pp = ap->private_data;
1541 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1542 ata_ehi_push_desc(ehi, "EDMA self-disable");
1545 if (edma_err_cause & EDMA_ERR_SERR) {
1546 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1547 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1548 err_mask = AC_ERR_ATA_BUS;
1549 action |= ATA_EH_RESET;
1553 /* Clear EDMA now that SERR cleanup done */
1554 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1557 err_mask = AC_ERR_OTHER;
1558 action |= ATA_EH_RESET;
1561 ehi->serror |= serr;
1562 ehi->action |= action;
1565 qc->err_mask |= err_mask;
1567 ehi->err_mask |= err_mask;
1569 if (edma_err_cause & eh_freeze_mask)
1570 ata_port_freeze(ap);
1575 static void mv_intr_pio(struct ata_port *ap)
1577 struct ata_queued_cmd *qc;
1580 /* ignore spurious intr if drive still BUSY */
1581 ata_status = readb(ap->ioaddr.status_addr);
1582 if (unlikely(ata_status & ATA_BUSY))
1585 /* get active ATA command */
1586 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1587 if (unlikely(!qc)) /* no active tag */
1589 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1592 /* and finally, complete the ATA command */
1593 qc->err_mask |= ac_err_mask(ata_status);
1594 ata_qc_complete(qc);
1597 static void mv_intr_edma(struct ata_port *ap)
1599 void __iomem *port_mmio = mv_ap_base(ap);
1600 struct mv_host_priv *hpriv = ap->host->private_data;
1601 struct mv_port_priv *pp = ap->private_data;
1602 struct ata_queued_cmd *qc;
1603 u32 out_index, in_index;
1604 bool work_done = false;
1606 /* get h/w response queue pointer */
1607 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1608 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1614 /* get s/w response queue last-read pointer, and compare */
1615 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1616 if (in_index == out_index)
1619 /* 50xx: get active ATA command */
1620 if (IS_GEN_I(hpriv))
1621 tag = ap->link.active_tag;
1623 /* Gen II/IIE: get active ATA command via tag, to enable
1624 * support for queueing. this works transparently for
1625 * queued and non-queued modes.
1628 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1630 qc = ata_qc_from_tag(ap, tag);
1632 /* For non-NCQ mode, the lower 8 bits of status
1633 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1634 * which should be zero if all went well.
1636 status = le16_to_cpu(pp->crpb[out_index].flags);
1637 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1638 mv_err_intr(ap, qc);
1642 /* and finally, complete the ATA command */
1645 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1646 ata_qc_complete(qc);
1649 /* advance software response queue pointer, to
1650 * indicate (after the loop completes) to hardware
1651 * that we have consumed a response queue entry.
1658 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1659 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1660 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1664 * mv_host_intr - Handle all interrupts on the given host controller
1665 * @host: host specific structure
1666 * @relevant: port error bits relevant to this host controller
1667 * @hc: which host controller we're to look at
1669 * Read then write clear the HC interrupt status then walk each
1670 * port connected to the HC and see if it needs servicing. Port
1671 * success ints are reported in the HC interrupt status reg, the
1672 * port error ints are reported in the higher level main
1673 * interrupt status register and thus are passed in via the
1674 * 'relevant' argument.
1677 * Inherited from caller.
1679 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1681 struct mv_host_priv *hpriv = host->private_data;
1682 void __iomem *mmio = hpriv->base;
1683 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1685 int port, port0, last_port;
1690 port0 = MV_PORTS_PER_HC;
1693 last_port = port0 + MV_PORTS_PER_HC;
1695 last_port = port0 + hpriv->n_ports;
1696 /* we'll need the HC success int register in most cases */
1697 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1701 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1703 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1704 hc, relevant, hc_irq_cause);
1706 for (port = port0; port < last_port; port++) {
1707 struct ata_port *ap = host->ports[port];
1708 struct mv_port_priv *pp;
1709 int have_err_bits, hard_port, shift;
1711 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1714 pp = ap->private_data;
1716 shift = port << 1; /* (port * 2) */
1717 if (port >= MV_PORTS_PER_HC)
1718 shift++; /* skip bit 8 in the HC Main IRQ reg */
1720 have_err_bits = ((PORT0_ERR << shift) & relevant);
1722 if (unlikely(have_err_bits)) {
1723 struct ata_queued_cmd *qc;
1725 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1726 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1729 mv_err_intr(ap, qc);
1733 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1735 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1736 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1739 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1746 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1748 struct mv_host_priv *hpriv = host->private_data;
1749 struct ata_port *ap;
1750 struct ata_queued_cmd *qc;
1751 struct ata_eh_info *ehi;
1752 unsigned int i, err_mask, printed = 0;
1755 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1757 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1760 DPRINTK("All regs @ PCI error\n");
1761 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1763 writelfl(0, mmio + hpriv->irq_cause_ofs);
1765 for (i = 0; i < host->n_ports; i++) {
1766 ap = host->ports[i];
1767 if (!ata_link_offline(&ap->link)) {
1768 ehi = &ap->link.eh_info;
1769 ata_ehi_clear_desc(ehi);
1771 ata_ehi_push_desc(ehi,
1772 "PCI err cause 0x%08x", err_cause);
1773 err_mask = AC_ERR_HOST_BUS;
1774 ehi->action = ATA_EH_RESET;
1775 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1777 qc->err_mask |= err_mask;
1779 ehi->err_mask |= err_mask;
1781 ata_port_freeze(ap);
1787 * mv_interrupt - Main interrupt event handler
1789 * @dev_instance: private data; in this case the host structure
1791 * Read the read only register to determine if any host
1792 * controllers have pending interrupts. If so, call lower level
1793 * routine to handle. Also check for PCI errors which are only
1797 * This routine holds the host lock while processing pending
1800 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1802 struct ata_host *host = dev_instance;
1803 struct mv_host_priv *hpriv = host->private_data;
1804 unsigned int hc, handled = 0, n_hcs;
1805 void __iomem *mmio = hpriv->base;
1806 u32 irq_stat, irq_mask;
1808 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1809 spin_lock(&host->lock);
1811 irq_stat = readl(hpriv->main_cause_reg_addr);
1812 irq_mask = readl(hpriv->main_mask_reg_addr);
1814 /* check the cases where we either have nothing pending or have read
1815 * a bogus register value which can indicate HW removal or PCI fault
1817 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1820 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1822 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1823 mv_pci_error(host, mmio);
1825 goto out_unlock; /* skip all other HC irq handling */
1828 for (hc = 0; hc < n_hcs; hc++) {
1829 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1831 mv_host_intr(host, relevant, hc);
1837 spin_unlock(&host->lock);
1839 return IRQ_RETVAL(handled);
1842 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1846 switch (sc_reg_in) {
1850 ofs = sc_reg_in * sizeof(u32);
1859 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1861 struct mv_host_priv *hpriv = ap->host->private_data;
1862 void __iomem *mmio = hpriv->base;
1863 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1864 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1866 if (ofs != 0xffffffffU) {
1867 *val = readl(addr + ofs);
1873 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1875 struct mv_host_priv *hpriv = ap->host->private_data;
1876 void __iomem *mmio = hpriv->base;
1877 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1878 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1880 if (ofs != 0xffffffffU) {
1881 writelfl(val, addr + ofs);
1887 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1889 struct pci_dev *pdev = to_pci_dev(host->dev);
1892 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1895 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1897 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1900 mv_reset_pci_bus(host, mmio);
1903 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1905 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1908 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1911 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1914 tmp = readl(phy_mmio + MV5_PHY_MODE);
1916 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1917 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1920 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1924 writel(0, mmio + MV_GPIO_PORT_CTL);
1926 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1928 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1930 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1933 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1936 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1937 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1939 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1942 tmp = readl(phy_mmio + MV5_LT_MODE);
1944 writel(tmp, phy_mmio + MV5_LT_MODE);
1946 tmp = readl(phy_mmio + MV5_PHY_CTL);
1949 writel(tmp, phy_mmio + MV5_PHY_CTL);
1952 tmp = readl(phy_mmio + MV5_PHY_MODE);
1954 tmp |= hpriv->signal[port].pre;
1955 tmp |= hpriv->signal[port].amps;
1956 writel(tmp, phy_mmio + MV5_PHY_MODE);
1961 #define ZERO(reg) writel(0, port_mmio + (reg))
1962 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1965 void __iomem *port_mmio = mv_port_base(mmio, port);
1968 * The datasheet warns against setting ATA_RST when EDMA is active
1969 * (but doesn't say what the problem might be). So we first try
1970 * to disable the EDMA engine before doing the ATA_RST operation.
1972 mv_reset_channel(hpriv, mmio, port);
1974 ZERO(0x028); /* command */
1975 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1976 ZERO(0x004); /* timer */
1977 ZERO(0x008); /* irq err cause */
1978 ZERO(0x00c); /* irq err mask */
1979 ZERO(0x010); /* rq bah */
1980 ZERO(0x014); /* rq inp */
1981 ZERO(0x018); /* rq outp */
1982 ZERO(0x01c); /* respq bah */
1983 ZERO(0x024); /* respq outp */
1984 ZERO(0x020); /* respq inp */
1985 ZERO(0x02c); /* test control */
1986 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1990 #define ZERO(reg) writel(0, hc_mmio + (reg))
1991 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1994 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2002 tmp = readl(hc_mmio + 0x20);
2005 writel(tmp, hc_mmio + 0x20);
2009 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2012 unsigned int hc, port;
2014 for (hc = 0; hc < n_hc; hc++) {
2015 for (port = 0; port < MV_PORTS_PER_HC; port++)
2016 mv5_reset_hc_port(hpriv, mmio,
2017 (hc * MV_PORTS_PER_HC) + port);
2019 mv5_reset_one_hc(hpriv, mmio, hc);
2026 #define ZERO(reg) writel(0, mmio + (reg))
2027 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2029 struct mv_host_priv *hpriv = host->private_data;
2032 tmp = readl(mmio + MV_PCI_MODE);
2034 writel(tmp, mmio + MV_PCI_MODE);
2036 ZERO(MV_PCI_DISC_TIMER);
2037 ZERO(MV_PCI_MSI_TRIGGER);
2038 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2039 ZERO(HC_MAIN_IRQ_MASK_OFS);
2040 ZERO(MV_PCI_SERR_MASK);
2041 ZERO(hpriv->irq_cause_ofs);
2042 ZERO(hpriv->irq_mask_ofs);
2043 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2044 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2045 ZERO(MV_PCI_ERR_ATTRIBUTE);
2046 ZERO(MV_PCI_ERR_COMMAND);
2050 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2054 mv5_reset_flash(hpriv, mmio);
2056 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2058 tmp |= (1 << 5) | (1 << 6);
2059 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2063 * mv6_reset_hc - Perform the 6xxx global soft reset
2064 * @mmio: base address of the HBA
2066 * This routine only applies to 6xxx parts.
2069 * Inherited from caller.
2071 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2074 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2078 /* Following procedure defined in PCI "main command and status
2082 writel(t | STOP_PCI_MASTER, reg);
2084 for (i = 0; i < 1000; i++) {
2087 if (PCI_MASTER_EMPTY & t)
2090 if (!(PCI_MASTER_EMPTY & t)) {
2091 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2099 writel(t | GLOB_SFT_RST, reg);
2102 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2104 if (!(GLOB_SFT_RST & t)) {
2105 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2110 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2113 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2116 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2118 if (GLOB_SFT_RST & t) {
2119 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2126 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2129 void __iomem *port_mmio;
2132 tmp = readl(mmio + MV_RESET_CFG);
2133 if ((tmp & (1 << 0)) == 0) {
2134 hpriv->signal[idx].amps = 0x7 << 8;
2135 hpriv->signal[idx].pre = 0x1 << 5;
2139 port_mmio = mv_port_base(mmio, idx);
2140 tmp = readl(port_mmio + PHY_MODE2);
2142 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2143 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2146 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2148 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2151 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2154 void __iomem *port_mmio = mv_port_base(mmio, port);
2156 u32 hp_flags = hpriv->hp_flags;
2158 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2160 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2163 if (fix_phy_mode2) {
2164 m2 = readl(port_mmio + PHY_MODE2);
2167 writel(m2, port_mmio + PHY_MODE2);
2171 m2 = readl(port_mmio + PHY_MODE2);
2172 m2 &= ~((1 << 16) | (1 << 31));
2173 writel(m2, port_mmio + PHY_MODE2);
2178 /* who knows what this magic does */
2179 tmp = readl(port_mmio + PHY_MODE3);
2182 writel(tmp, port_mmio + PHY_MODE3);
2184 if (fix_phy_mode4) {
2187 m4 = readl(port_mmio + PHY_MODE4);
2189 if (hp_flags & MV_HP_ERRATA_60X1B2)
2190 tmp = readl(port_mmio + PHY_MODE3);
2192 /* workaround for errata FEr SATA#10 (part 1) */
2193 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2195 writel(m4, port_mmio + PHY_MODE4);
2197 if (hp_flags & MV_HP_ERRATA_60X1B2)
2198 writel(tmp, port_mmio + PHY_MODE3);
2201 /* Revert values of pre-emphasis and signal amps to the saved ones */
2202 m2 = readl(port_mmio + PHY_MODE2);
2204 m2 &= ~MV_M2_PREAMP_MASK;
2205 m2 |= hpriv->signal[port].amps;
2206 m2 |= hpriv->signal[port].pre;
2209 /* according to mvSata 3.6.1, some IIE values are fixed */
2210 if (IS_GEN_IIE(hpriv)) {
2215 writel(m2, port_mmio + PHY_MODE2);
2218 /* TODO: use the generic LED interface to configure the SATA Presence */
2219 /* & Acitivy LEDs on the board */
2220 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2226 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2229 void __iomem *port_mmio;
2232 port_mmio = mv_port_base(mmio, idx);
2233 tmp = readl(port_mmio + PHY_MODE2);
2235 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2236 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2240 #define ZERO(reg) writel(0, port_mmio + (reg))
2241 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2242 void __iomem *mmio, unsigned int port)
2244 void __iomem *port_mmio = mv_port_base(mmio, port);
2247 * The datasheet warns against setting ATA_RST when EDMA is active
2248 * (but doesn't say what the problem might be). So we first try
2249 * to disable the EDMA engine before doing the ATA_RST operation.
2251 mv_reset_channel(hpriv, mmio, port);
2253 ZERO(0x028); /* command */
2254 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2255 ZERO(0x004); /* timer */
2256 ZERO(0x008); /* irq err cause */
2257 ZERO(0x00c); /* irq err mask */
2258 ZERO(0x010); /* rq bah */
2259 ZERO(0x014); /* rq inp */
2260 ZERO(0x018); /* rq outp */
2261 ZERO(0x01c); /* respq bah */
2262 ZERO(0x024); /* respq outp */
2263 ZERO(0x020); /* respq inp */
2264 ZERO(0x02c); /* test control */
2265 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2270 #define ZERO(reg) writel(0, hc_mmio + (reg))
2271 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2274 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2284 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2285 void __iomem *mmio, unsigned int n_hc)
2289 for (port = 0; port < hpriv->n_ports; port++)
2290 mv_soc_reset_hc_port(hpriv, mmio, port);
2292 mv_soc_reset_one_hc(hpriv, mmio);
2297 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2303 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2308 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2310 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2312 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2314 ifctl |= (1 << 7); /* enable gen2i speed */
2315 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2319 * Caller must ensure that EDMA is not active,
2320 * by first doing mv_stop_edma() where needed.
2322 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2323 unsigned int port_no)
2325 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2327 mv_stop_edma_engine(port_mmio);
2328 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2330 if (!IS_GEN_I(hpriv)) {
2331 /* Enable 3.0gb/s link speed */
2332 mv_setup_ifctl(port_mmio, 1);
2335 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2336 * link, and physical layers. It resets all SATA interface registers
2337 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2339 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2340 udelay(25); /* allow reset propagation */
2341 writelfl(0, port_mmio + EDMA_CMD_OFS);
2343 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2345 if (IS_GEN_I(hpriv))
2349 static void mv_pmp_select(struct ata_port *ap, int pmp)
2351 if (sata_pmp_supported(ap)) {
2352 void __iomem *port_mmio = mv_ap_base(ap);
2353 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2354 int old = reg & 0xf;
2357 reg = (reg & ~0xf) | pmp;
2358 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2363 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2364 unsigned long deadline)
2366 mv_pmp_select(link->ap, sata_srst_pmp(link));
2367 return sata_std_hardreset(link, class, deadline);
2370 static int mv_softreset(struct ata_link *link, unsigned int *class,
2371 unsigned long deadline)
2373 mv_pmp_select(link->ap, sata_srst_pmp(link));
2374 return ata_sff_softreset(link, class, deadline);
2377 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2378 unsigned long deadline)
2380 struct ata_port *ap = link->ap;
2381 struct mv_host_priv *hpriv = ap->host->private_data;
2382 struct mv_port_priv *pp = ap->private_data;
2383 void __iomem *mmio = hpriv->base;
2384 int rc, attempts = 0, extra = 0;
2388 mv_reset_channel(hpriv, mmio, ap->port_no);
2389 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2391 /* Workaround for errata FEr SATA#10 (part 2) */
2393 const unsigned long *timing =
2394 sata_ehc_deb_timing(&link->eh_context);
2396 rc = sata_link_hardreset(link, timing, deadline + extra,
2400 sata_scr_read(link, SCR_STATUS, &sstatus);
2401 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2402 /* Force 1.5gb/s link speed and try again */
2403 mv_setup_ifctl(mv_ap_base(ap), 0);
2404 if (time_after(jiffies + HZ, deadline))
2405 extra = HZ; /* only extend it once, max */
2407 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2412 static void mv_eh_freeze(struct ata_port *ap)
2414 struct mv_host_priv *hpriv = ap->host->private_data;
2415 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2419 /* FIXME: handle coalescing completion events properly */
2421 shift = ap->port_no * 2;
2425 mask = 0x3 << shift;
2427 /* disable assertion of portN err, done events */
2428 tmp = readl(hpriv->main_mask_reg_addr);
2429 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2432 static void mv_eh_thaw(struct ata_port *ap)
2434 struct mv_host_priv *hpriv = ap->host->private_data;
2435 void __iomem *mmio = hpriv->base;
2436 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2437 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2438 void __iomem *port_mmio = mv_ap_base(ap);
2439 u32 tmp, mask, hc_irq_cause;
2440 unsigned int shift, hc_port_no = ap->port_no;
2442 /* FIXME: handle coalescing completion events properly */
2444 shift = ap->port_no * 2;
2450 mask = 0x3 << shift;
2452 /* clear EDMA errors on this port */
2453 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2455 /* clear pending irq events */
2456 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2457 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2458 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2459 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2461 /* enable assertion of portN err, done events */
2462 tmp = readl(hpriv->main_mask_reg_addr);
2463 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2467 * mv_port_init - Perform some early initialization on a single port.
2468 * @port: libata data structure storing shadow register addresses
2469 * @port_mmio: base address of the port
2471 * Initialize shadow register mmio addresses, clear outstanding
2472 * interrupts on the port, and unmask interrupts for the future
2473 * start of the port.
2476 * Inherited from caller.
2478 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2480 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2483 /* PIO related setup
2485 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2487 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2488 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2489 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2490 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2491 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2492 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2494 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2495 /* special case: control/altstatus doesn't have ATA_REG_ address */
2496 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2499 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2501 /* Clear any currently outstanding port interrupt conditions */
2502 serr_ofs = mv_scr_offset(SCR_ERROR);
2503 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2504 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2506 /* unmask all non-transient EDMA error interrupts */
2507 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2509 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2510 readl(port_mmio + EDMA_CFG_OFS),
2511 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2512 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2515 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2517 struct pci_dev *pdev = to_pci_dev(host->dev);
2518 struct mv_host_priv *hpriv = host->private_data;
2519 u32 hp_flags = hpriv->hp_flags;
2521 switch (board_idx) {
2523 hpriv->ops = &mv5xxx_ops;
2524 hp_flags |= MV_HP_GEN_I;
2526 switch (pdev->revision) {
2528 hp_flags |= MV_HP_ERRATA_50XXB0;
2531 hp_flags |= MV_HP_ERRATA_50XXB2;
2534 dev_printk(KERN_WARNING, &pdev->dev,
2535 "Applying 50XXB2 workarounds to unknown rev\n");
2536 hp_flags |= MV_HP_ERRATA_50XXB2;
2543 hpriv->ops = &mv5xxx_ops;
2544 hp_flags |= MV_HP_GEN_I;
2546 switch (pdev->revision) {
2548 hp_flags |= MV_HP_ERRATA_50XXB0;
2551 hp_flags |= MV_HP_ERRATA_50XXB2;
2554 dev_printk(KERN_WARNING, &pdev->dev,
2555 "Applying B2 workarounds to unknown rev\n");
2556 hp_flags |= MV_HP_ERRATA_50XXB2;
2563 hpriv->ops = &mv6xxx_ops;
2564 hp_flags |= MV_HP_GEN_II;
2566 switch (pdev->revision) {
2568 hp_flags |= MV_HP_ERRATA_60X1B2;
2571 hp_flags |= MV_HP_ERRATA_60X1C0;
2574 dev_printk(KERN_WARNING, &pdev->dev,
2575 "Applying B2 workarounds to unknown rev\n");
2576 hp_flags |= MV_HP_ERRATA_60X1B2;
2582 hp_flags |= MV_HP_PCIE;
2583 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2584 (pdev->device == 0x2300 || pdev->device == 0x2310))
2587 * Highpoint RocketRAID PCIe 23xx series cards:
2589 * Unconfigured drives are treated as "Legacy"
2590 * by the BIOS, and it overwrites sector 8 with
2591 * a "Lgcy" metadata block prior to Linux boot.
2593 * Configured drives (RAID or JBOD) leave sector 8
2594 * alone, but instead overwrite a high numbered
2595 * sector for the RAID metadata. This sector can
2596 * be determined exactly, by truncating the physical
2597 * drive capacity to a nice even GB value.
2599 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2601 * Warn the user, lest they think we're just buggy.
2603 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2604 " BIOS CORRUPTS DATA on all attached drives,"
2605 " regardless of if/how they are configured."
2607 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2608 " use sectors 8-9 on \"Legacy\" drives,"
2609 " and avoid the final two gigabytes on"
2610 " all RocketRAID BIOS initialized drives.\n");
2613 hpriv->ops = &mv6xxx_ops;
2614 hp_flags |= MV_HP_GEN_IIE;
2616 switch (pdev->revision) {
2618 hp_flags |= MV_HP_ERRATA_XX42A0;
2621 hp_flags |= MV_HP_ERRATA_60X1C0;
2624 dev_printk(KERN_WARNING, &pdev->dev,
2625 "Applying 60X1C0 workarounds to unknown rev\n");
2626 hp_flags |= MV_HP_ERRATA_60X1C0;
2631 hpriv->ops = &mv_soc_ops;
2632 hp_flags |= MV_HP_ERRATA_60X1C0;
2636 dev_printk(KERN_ERR, host->dev,
2637 "BUG: invalid board index %u\n", board_idx);
2641 hpriv->hp_flags = hp_flags;
2642 if (hp_flags & MV_HP_PCIE) {
2643 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2644 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2645 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2647 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2648 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2649 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2656 * mv_init_host - Perform some early initialization of the host.
2657 * @host: ATA host to initialize
2658 * @board_idx: controller index
2660 * If possible, do an early global reset of the host. Then do
2661 * our port init and clear/unmask all/relevant host interrupts.
2664 * Inherited from caller.
2666 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2668 int rc = 0, n_hc, port, hc;
2669 struct mv_host_priv *hpriv = host->private_data;
2670 void __iomem *mmio = hpriv->base;
2672 rc = mv_chip_id(host, board_idx);
2676 if (HAS_PCI(host)) {
2677 hpriv->main_cause_reg_addr = hpriv->base +
2678 HC_MAIN_IRQ_CAUSE_OFS;
2679 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2681 hpriv->main_cause_reg_addr = hpriv->base +
2682 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2683 hpriv->main_mask_reg_addr = hpriv->base +
2684 HC_SOC_MAIN_IRQ_MASK_OFS;
2686 /* global interrupt mask */
2687 writel(0, hpriv->main_mask_reg_addr);
2689 n_hc = mv_get_hc_count(host->ports[0]->flags);
2691 for (port = 0; port < host->n_ports; port++)
2692 hpriv->ops->read_preamp(hpriv, port, mmio);
2694 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2698 hpriv->ops->reset_flash(hpriv, mmio);
2699 hpriv->ops->reset_bus(host, mmio);
2700 hpriv->ops->enable_leds(hpriv, mmio);
2702 for (port = 0; port < host->n_ports; port++) {
2703 struct ata_port *ap = host->ports[port];
2704 void __iomem *port_mmio = mv_port_base(mmio, port);
2706 mv_port_init(&ap->ioaddr, port_mmio);
2709 if (HAS_PCI(host)) {
2710 unsigned int offset = port_mmio - mmio;
2711 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2712 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2717 for (hc = 0; hc < n_hc; hc++) {
2718 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2720 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2721 "(before clear)=0x%08x\n", hc,
2722 readl(hc_mmio + HC_CFG_OFS),
2723 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2725 /* Clear any currently outstanding hc interrupt conditions */
2726 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2729 if (HAS_PCI(host)) {
2730 /* Clear any currently outstanding host interrupt conditions */
2731 writelfl(0, mmio + hpriv->irq_cause_ofs);
2733 /* and unmask interrupt generation for host regs */
2734 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2735 if (IS_GEN_I(hpriv))
2736 writelfl(~HC_MAIN_MASKED_IRQS_5,
2737 hpriv->main_mask_reg_addr);
2739 writelfl(~HC_MAIN_MASKED_IRQS,
2740 hpriv->main_mask_reg_addr);
2742 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2743 "PCI int cause/mask=0x%08x/0x%08x\n",
2744 readl(hpriv->main_cause_reg_addr),
2745 readl(hpriv->main_mask_reg_addr),
2746 readl(mmio + hpriv->irq_cause_ofs),
2747 readl(mmio + hpriv->irq_mask_ofs));
2749 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2750 hpriv->main_mask_reg_addr);
2751 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2752 readl(hpriv->main_cause_reg_addr),
2753 readl(hpriv->main_mask_reg_addr));
2759 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2761 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2763 if (!hpriv->crqb_pool)
2766 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2768 if (!hpriv->crpb_pool)
2771 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2773 if (!hpriv->sg_tbl_pool)
2780 * mv_platform_probe - handle a positive probe of an soc Marvell
2782 * @pdev: platform device found
2785 * Inherited from caller.
2787 static int mv_platform_probe(struct platform_device *pdev)
2789 static int printed_version;
2790 const struct mv_sata_platform_data *mv_platform_data;
2791 const struct ata_port_info *ppi[] =
2792 { &mv_port_info[chip_soc], NULL };
2793 struct ata_host *host;
2794 struct mv_host_priv *hpriv;
2795 struct resource *res;
2798 if (!printed_version++)
2799 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2802 * Simple resource validation ..
2804 if (unlikely(pdev->num_resources != 2)) {
2805 dev_err(&pdev->dev, "invalid number of resources\n");
2810 * Get the register base first
2812 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2817 mv_platform_data = pdev->dev.platform_data;
2818 n_ports = mv_platform_data->n_ports;
2820 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2821 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2823 if (!host || !hpriv)
2825 host->private_data = hpriv;
2826 hpriv->n_ports = n_ports;
2829 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2830 res->end - res->start + 1);
2831 hpriv->base -= MV_SATAHC0_REG_BASE;
2833 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2837 /* initialize adapter */
2838 rc = mv_init_host(host, chip_soc);
2842 dev_printk(KERN_INFO, &pdev->dev,
2843 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2846 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2847 IRQF_SHARED, &mv6_sht);
2852 * mv_platform_remove - unplug a platform interface
2853 * @pdev: platform device
2855 * A platform bus SATA device has been unplugged. Perform the needed
2856 * cleanup. Also called on module unload for any active devices.
2858 static int __devexit mv_platform_remove(struct platform_device *pdev)
2860 struct device *dev = &pdev->dev;
2861 struct ata_host *host = dev_get_drvdata(dev);
2863 ata_host_detach(host);
2867 static struct platform_driver mv_platform_driver = {
2868 .probe = mv_platform_probe,
2869 .remove = __devexit_p(mv_platform_remove),
2872 .owner = THIS_MODULE,
2878 static int mv_pci_init_one(struct pci_dev *pdev,
2879 const struct pci_device_id *ent);
2882 static struct pci_driver mv_pci_driver = {
2884 .id_table = mv_pci_tbl,
2885 .probe = mv_pci_init_one,
2886 .remove = ata_pci_remove_one,
2892 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2895 /* move to PCI layer or libata core? */
2896 static int pci_go_64(struct pci_dev *pdev)
2900 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2901 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2903 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2905 dev_printk(KERN_ERR, &pdev->dev,
2906 "64-bit DMA enable failed\n");
2911 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2913 dev_printk(KERN_ERR, &pdev->dev,
2914 "32-bit DMA enable failed\n");
2917 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2919 dev_printk(KERN_ERR, &pdev->dev,
2920 "32-bit consistent DMA enable failed\n");
2929 * mv_print_info - Dump key info to kernel log for perusal.
2930 * @host: ATA host to print info about
2932 * FIXME: complete this.
2935 * Inherited from caller.
2937 static void mv_print_info(struct ata_host *host)
2939 struct pci_dev *pdev = to_pci_dev(host->dev);
2940 struct mv_host_priv *hpriv = host->private_data;
2942 const char *scc_s, *gen;
2944 /* Use this to determine the HW stepping of the chip so we know
2945 * what errata to workaround
2947 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2950 else if (scc == 0x01)
2955 if (IS_GEN_I(hpriv))
2957 else if (IS_GEN_II(hpriv))
2959 else if (IS_GEN_IIE(hpriv))
2964 dev_printk(KERN_INFO, &pdev->dev,
2965 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2966 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2967 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2971 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2972 * @pdev: PCI device found
2973 * @ent: PCI device ID entry for the matched host
2976 * Inherited from caller.
2978 static int mv_pci_init_one(struct pci_dev *pdev,
2979 const struct pci_device_id *ent)
2981 static int printed_version;
2982 unsigned int board_idx = (unsigned int)ent->driver_data;
2983 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2984 struct ata_host *host;
2985 struct mv_host_priv *hpriv;
2988 if (!printed_version++)
2989 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2992 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2994 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2995 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2996 if (!host || !hpriv)
2998 host->private_data = hpriv;
2999 hpriv->n_ports = n_ports;
3001 /* acquire resources */
3002 rc = pcim_enable_device(pdev);
3006 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3008 pcim_pin_device(pdev);
3011 host->iomap = pcim_iomap_table(pdev);
3012 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3014 rc = pci_go_64(pdev);
3018 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3022 /* initialize adapter */
3023 rc = mv_init_host(host, board_idx);
3027 /* Enable interrupts */
3028 if (msi && pci_enable_msi(pdev))
3031 mv_dump_pci_cfg(pdev, 0x68);
3032 mv_print_info(host);
3034 pci_set_master(pdev);
3035 pci_try_set_mwi(pdev);
3036 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3037 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3041 static int mv_platform_probe(struct platform_device *pdev);
3042 static int __devexit mv_platform_remove(struct platform_device *pdev);
3044 static int __init mv_init(void)
3048 rc = pci_register_driver(&mv_pci_driver);
3052 rc = platform_driver_register(&mv_platform_driver);
3056 pci_unregister_driver(&mv_pci_driver);
3061 static void __exit mv_exit(void)
3064 pci_unregister_driver(&mv_pci_driver);
3066 platform_driver_unregister(&mv_platform_driver);
3069 MODULE_AUTHOR("Brett Russ");
3070 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3071 MODULE_LICENSE("GPL");
3072 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3073 MODULE_VERSION(DRV_VERSION);
3074 MODULE_ALIAS("platform:" DRV_NAME);
3077 module_param(msi, int, 0444);
3078 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3081 module_init(mv_init);
3082 module_exit(mv_exit);