]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/ata/sata_nv.c
[XFS] kill BMAPI_DEVICE
[linux-2.6-omap-h63xx.git] / drivers / ata / sata_nv.c
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME                        "sata_nv"
52 #define DRV_VERSION                     "3.5"
53
54 #define NV_ADMA_DMA_BOUNDARY            0xffffffffUL
55
56 enum {
57         NV_MMIO_BAR                     = 5,
58
59         NV_PORTS                        = 2,
60         NV_PIO_MASK                     = 0x1f,
61         NV_MWDMA_MASK                   = 0x07,
62         NV_UDMA_MASK                    = 0x7f,
63         NV_PORT0_SCR_REG_OFFSET         = 0x00,
64         NV_PORT1_SCR_REG_OFFSET         = 0x40,
65
66         /* INT_STATUS/ENABLE */
67         NV_INT_STATUS                   = 0x10,
68         NV_INT_ENABLE                   = 0x11,
69         NV_INT_STATUS_CK804             = 0x440,
70         NV_INT_ENABLE_CK804             = 0x441,
71
72         /* INT_STATUS/ENABLE bits */
73         NV_INT_DEV                      = 0x01,
74         NV_INT_PM                       = 0x02,
75         NV_INT_ADDED                    = 0x04,
76         NV_INT_REMOVED                  = 0x08,
77
78         NV_INT_PORT_SHIFT               = 4,    /* each port occupies 4 bits */
79
80         NV_INT_ALL                      = 0x0f,
81         NV_INT_MASK                     = NV_INT_DEV |
82                                           NV_INT_ADDED | NV_INT_REMOVED,
83
84         /* INT_CONFIG */
85         NV_INT_CONFIG                   = 0x12,
86         NV_INT_CONFIG_METHD             = 0x01, // 0 = INT, 1 = SMI
87
88         // For PCI config register 20
89         NV_MCP_SATA_CFG_20              = 0x50,
90         NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91         NV_MCP_SATA_CFG_20_PORT0_EN     = (1 << 17),
92         NV_MCP_SATA_CFG_20_PORT1_EN     = (1 << 16),
93         NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94         NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96         NV_ADMA_MAX_CPBS                = 32,
97         NV_ADMA_CPB_SZ                  = 128,
98         NV_ADMA_APRD_SZ                 = 16,
99         NV_ADMA_SGTBL_LEN               = (1024 - NV_ADMA_CPB_SZ) /
100                                            NV_ADMA_APRD_SZ,
101         NV_ADMA_SGTBL_TOTAL_LEN         = NV_ADMA_SGTBL_LEN + 5,
102         NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103         NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104                                            (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106         /* BAR5 offset to ADMA general registers */
107         NV_ADMA_GEN                     = 0x400,
108         NV_ADMA_GEN_CTL                 = 0x00,
109         NV_ADMA_NOTIFIER_CLEAR          = 0x30,
110
111         /* BAR5 offset to ADMA ports */
112         NV_ADMA_PORT                    = 0x480,
113
114         /* size of ADMA port register space  */
115         NV_ADMA_PORT_SIZE               = 0x100,
116
117         /* ADMA port registers */
118         NV_ADMA_CTL                     = 0x40,
119         NV_ADMA_CPB_COUNT               = 0x42,
120         NV_ADMA_NEXT_CPB_IDX            = 0x43,
121         NV_ADMA_STAT                    = 0x44,
122         NV_ADMA_CPB_BASE_LOW            = 0x48,
123         NV_ADMA_CPB_BASE_HIGH           = 0x4C,
124         NV_ADMA_APPEND                  = 0x50,
125         NV_ADMA_NOTIFIER                = 0x68,
126         NV_ADMA_NOTIFIER_ERROR          = 0x6C,
127
128         /* NV_ADMA_CTL register bits */
129         NV_ADMA_CTL_HOTPLUG_IEN         = (1 << 0),
130         NV_ADMA_CTL_CHANNEL_RESET       = (1 << 5),
131         NV_ADMA_CTL_GO                  = (1 << 7),
132         NV_ADMA_CTL_AIEN                = (1 << 8),
133         NV_ADMA_CTL_READ_NON_COHERENT   = (1 << 11),
134         NV_ADMA_CTL_WRITE_NON_COHERENT  = (1 << 12),
135
136         /* CPB response flag bits */
137         NV_CPB_RESP_DONE                = (1 << 0),
138         NV_CPB_RESP_ATA_ERR             = (1 << 3),
139         NV_CPB_RESP_CMD_ERR             = (1 << 4),
140         NV_CPB_RESP_CPB_ERR             = (1 << 7),
141
142         /* CPB control flag bits */
143         NV_CPB_CTL_CPB_VALID            = (1 << 0),
144         NV_CPB_CTL_QUEUE                = (1 << 1),
145         NV_CPB_CTL_APRD_VALID           = (1 << 2),
146         NV_CPB_CTL_IEN                  = (1 << 3),
147         NV_CPB_CTL_FPDMA                = (1 << 4),
148
149         /* APRD flags */
150         NV_APRD_WRITE                   = (1 << 1),
151         NV_APRD_END                     = (1 << 2),
152         NV_APRD_CONT                    = (1 << 3),
153
154         /* NV_ADMA_STAT flags */
155         NV_ADMA_STAT_TIMEOUT            = (1 << 0),
156         NV_ADMA_STAT_HOTUNPLUG          = (1 << 1),
157         NV_ADMA_STAT_HOTPLUG            = (1 << 2),
158         NV_ADMA_STAT_CPBERR             = (1 << 4),
159         NV_ADMA_STAT_SERROR             = (1 << 5),
160         NV_ADMA_STAT_CMD_COMPLETE       = (1 << 6),
161         NV_ADMA_STAT_IDLE               = (1 << 8),
162         NV_ADMA_STAT_LEGACY             = (1 << 9),
163         NV_ADMA_STAT_STOPPED            = (1 << 10),
164         NV_ADMA_STAT_DONE               = (1 << 12),
165         NV_ADMA_STAT_ERR                = NV_ADMA_STAT_CPBERR |
166                                           NV_ADMA_STAT_TIMEOUT,
167
168         /* port flags */
169         NV_ADMA_PORT_REGISTER_MODE      = (1 << 0),
170         NV_ADMA_ATAPI_SETUP_COMPLETE    = (1 << 1),
171
172         /* MCP55 reg offset */
173         NV_CTL_MCP55                    = 0x400,
174         NV_INT_STATUS_MCP55             = 0x440,
175         NV_INT_ENABLE_MCP55             = 0x444,
176         NV_NCQ_REG_MCP55                = 0x448,
177
178         /* MCP55 */
179         NV_INT_ALL_MCP55                = 0xffff,
180         NV_INT_PORT_SHIFT_MCP55         = 16,   /* each port occupies 16 bits */
181         NV_INT_MASK_MCP55               = NV_INT_ALL_MCP55 & 0xfffd,
182
183         /* SWNCQ ENABLE BITS*/
184         NV_CTL_PRI_SWNCQ                = 0x02,
185         NV_CTL_SEC_SWNCQ                = 0x04,
186
187         /* SW NCQ status bits*/
188         NV_SWNCQ_IRQ_DEV                = (1 << 0),
189         NV_SWNCQ_IRQ_PM                 = (1 << 1),
190         NV_SWNCQ_IRQ_ADDED              = (1 << 2),
191         NV_SWNCQ_IRQ_REMOVED            = (1 << 3),
192
193         NV_SWNCQ_IRQ_BACKOUT            = (1 << 4),
194         NV_SWNCQ_IRQ_SDBFIS             = (1 << 5),
195         NV_SWNCQ_IRQ_DHREGFIS           = (1 << 6),
196         NV_SWNCQ_IRQ_DMASETUP           = (1 << 7),
197
198         NV_SWNCQ_IRQ_HOTPLUG            = NV_SWNCQ_IRQ_ADDED |
199                                           NV_SWNCQ_IRQ_REMOVED,
200
201 };
202
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205         __le64                  addr;
206         __le32                  len;
207         u8                      flags;
208         u8                      packet_len;
209         __le16                  reserved;
210 };
211
212 enum nv_adma_regbits {
213         CMDEND  = (1 << 15),            /* end of command list */
214         WNB     = (1 << 14),            /* wait-not-BSY */
215         IGN     = (1 << 13),            /* ignore this entry */
216         CS1n    = (1 << (4 + 8)),       /* std. PATA signals follow... */
217         DA2     = (1 << (2 + 8)),
218         DA1     = (1 << (1 + 8)),
219         DA0     = (1 << (0 + 8)),
220 };
221
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227         u8                      resp_flags;    /* 0 */
228         u8                      reserved1;     /* 1 */
229         u8                      ctl_flags;     /* 2 */
230         /* len is length of taskfile in 64 bit words */
231         u8                      len;            /* 3  */
232         u8                      tag;           /* 4 */
233         u8                      next_cpb_idx;  /* 5 */
234         __le16                  reserved2;     /* 6-7 */
235         __le16                  tf[12];        /* 8-31 */
236         struct nv_adma_prd      aprd[5];       /* 32-111 */
237         __le64                  next_aprd;     /* 112-119 */
238         __le64                  reserved3;     /* 120-127 */
239 };
240
241
242 struct nv_adma_port_priv {
243         struct nv_adma_cpb      *cpb;
244         dma_addr_t              cpb_dma;
245         struct nv_adma_prd      *aprd;
246         dma_addr_t              aprd_dma;
247         void __iomem            *ctl_block;
248         void __iomem            *gen_block;
249         void __iomem            *notifier_clear_block;
250         u8                      flags;
251         int                     last_issue_ncq;
252 };
253
254 struct nv_host_priv {
255         unsigned long           type;
256 };
257
258 struct defer_queue {
259         u32             defer_bits;
260         unsigned int    head;
261         unsigned int    tail;
262         unsigned int    tag[ATA_MAX_QUEUE];
263 };
264
265 enum ncq_saw_flag_list {
266         ncq_saw_d2h     = (1U << 0),
267         ncq_saw_dmas    = (1U << 1),
268         ncq_saw_sdb     = (1U << 2),
269         ncq_saw_backout = (1U << 3),
270 };
271
272 struct nv_swncq_port_priv {
273         struct ata_prd  *prd;    /* our SG list */
274         dma_addr_t      prd_dma; /* and its DMA mapping */
275         void __iomem    *sactive_block;
276         void __iomem    *irq_block;
277         void __iomem    *tag_block;
278         u32             qc_active;
279
280         unsigned int    last_issue_tag;
281
282         /* fifo circular queue to store deferral command */
283         struct defer_queue defer_queue;
284
285         /* for NCQ interrupt analysis */
286         u32             dhfis_bits;
287         u32             dmafis_bits;
288         u32             sdbfis_bits;
289
290         unsigned int    ncq_flags;
291 };
292
293
294 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
295
296 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
297 #ifdef CONFIG_PM
298 static int nv_pci_device_resume(struct pci_dev *pdev);
299 #endif
300 static void nv_ck804_host_stop(struct ata_host *host);
301 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
302 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
304 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
305 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
306
307 static void nv_nf2_freeze(struct ata_port *ap);
308 static void nv_nf2_thaw(struct ata_port *ap);
309 static void nv_ck804_freeze(struct ata_port *ap);
310 static void nv_ck804_thaw(struct ata_port *ap);
311 static void nv_error_handler(struct ata_port *ap);
312 static int nv_adma_slave_config(struct scsi_device *sdev);
313 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
314 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
315 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
316 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
317 static void nv_adma_irq_clear(struct ata_port *ap);
318 static int nv_adma_port_start(struct ata_port *ap);
319 static void nv_adma_port_stop(struct ata_port *ap);
320 #ifdef CONFIG_PM
321 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
322 static int nv_adma_port_resume(struct ata_port *ap);
323 #endif
324 static void nv_adma_freeze(struct ata_port *ap);
325 static void nv_adma_thaw(struct ata_port *ap);
326 static void nv_adma_error_handler(struct ata_port *ap);
327 static void nv_adma_host_stop(struct ata_host *host);
328 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
329 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
330
331 static void nv_mcp55_thaw(struct ata_port *ap);
332 static void nv_mcp55_freeze(struct ata_port *ap);
333 static void nv_swncq_error_handler(struct ata_port *ap);
334 static int nv_swncq_slave_config(struct scsi_device *sdev);
335 static int nv_swncq_port_start(struct ata_port *ap);
336 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
337 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
338 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
339 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
340 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
341 #ifdef CONFIG_PM
342 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
343 static int nv_swncq_port_resume(struct ata_port *ap);
344 #endif
345
346 enum nv_host_type
347 {
348         GENERIC,
349         NFORCE2,
350         NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
351         CK804,
352         ADMA,
353         SWNCQ,
354 };
355
356 static const struct pci_device_id nv_pci_tbl[] = {
357         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
358         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
359         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
360         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
361         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
362         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
363         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
364         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
365         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
366         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
367         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
368         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
369         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
370         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
371
372         { } /* terminate list */
373 };
374
375 static struct pci_driver nv_pci_driver = {
376         .name                   = DRV_NAME,
377         .id_table               = nv_pci_tbl,
378         .probe                  = nv_init_one,
379 #ifdef CONFIG_PM
380         .suspend                = ata_pci_device_suspend,
381         .resume                 = nv_pci_device_resume,
382 #endif
383         .remove                 = ata_pci_remove_one,
384 };
385
386 static struct scsi_host_template nv_sht = {
387         .module                 = THIS_MODULE,
388         .name                   = DRV_NAME,
389         .ioctl                  = ata_scsi_ioctl,
390         .queuecommand           = ata_scsi_queuecmd,
391         .can_queue              = ATA_DEF_QUEUE,
392         .this_id                = ATA_SHT_THIS_ID,
393         .sg_tablesize           = LIBATA_MAX_PRD,
394         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
395         .emulated               = ATA_SHT_EMULATED,
396         .use_clustering         = ATA_SHT_USE_CLUSTERING,
397         .proc_name              = DRV_NAME,
398         .dma_boundary           = ATA_DMA_BOUNDARY,
399         .slave_configure        = ata_scsi_slave_config,
400         .slave_destroy          = ata_scsi_slave_destroy,
401         .bios_param             = ata_std_bios_param,
402 };
403
404 static struct scsi_host_template nv_adma_sht = {
405         .module                 = THIS_MODULE,
406         .name                   = DRV_NAME,
407         .ioctl                  = ata_scsi_ioctl,
408         .queuecommand           = ata_scsi_queuecmd,
409         .change_queue_depth     = ata_scsi_change_queue_depth,
410         .can_queue              = NV_ADMA_MAX_CPBS,
411         .this_id                = ATA_SHT_THIS_ID,
412         .sg_tablesize           = NV_ADMA_SGTBL_TOTAL_LEN,
413         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
414         .emulated               = ATA_SHT_EMULATED,
415         .use_clustering         = ATA_SHT_USE_CLUSTERING,
416         .proc_name              = DRV_NAME,
417         .dma_boundary           = NV_ADMA_DMA_BOUNDARY,
418         .slave_configure        = nv_adma_slave_config,
419         .slave_destroy          = ata_scsi_slave_destroy,
420         .bios_param             = ata_std_bios_param,
421 };
422
423 static struct scsi_host_template nv_swncq_sht = {
424         .module                 = THIS_MODULE,
425         .name                   = DRV_NAME,
426         .ioctl                  = ata_scsi_ioctl,
427         .queuecommand           = ata_scsi_queuecmd,
428         .change_queue_depth     = ata_scsi_change_queue_depth,
429         .can_queue              = ATA_MAX_QUEUE,
430         .this_id                = ATA_SHT_THIS_ID,
431         .sg_tablesize           = LIBATA_MAX_PRD,
432         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
433         .emulated               = ATA_SHT_EMULATED,
434         .use_clustering         = ATA_SHT_USE_CLUSTERING,
435         .proc_name              = DRV_NAME,
436         .dma_boundary           = ATA_DMA_BOUNDARY,
437         .slave_configure        = nv_swncq_slave_config,
438         .slave_destroy          = ata_scsi_slave_destroy,
439         .bios_param             = ata_std_bios_param,
440 };
441
442 static const struct ata_port_operations nv_generic_ops = {
443         .tf_load                = ata_tf_load,
444         .tf_read                = ata_tf_read,
445         .exec_command           = ata_exec_command,
446         .check_status           = ata_check_status,
447         .dev_select             = ata_std_dev_select,
448         .bmdma_setup            = ata_bmdma_setup,
449         .bmdma_start            = ata_bmdma_start,
450         .bmdma_stop             = ata_bmdma_stop,
451         .bmdma_status           = ata_bmdma_status,
452         .qc_prep                = ata_qc_prep,
453         .qc_issue               = ata_qc_issue_prot,
454         .freeze                 = ata_bmdma_freeze,
455         .thaw                   = ata_bmdma_thaw,
456         .error_handler          = nv_error_handler,
457         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
458         .data_xfer              = ata_data_xfer,
459         .irq_clear              = ata_bmdma_irq_clear,
460         .irq_on                 = ata_irq_on,
461         .scr_read               = nv_scr_read,
462         .scr_write              = nv_scr_write,
463         .port_start             = ata_port_start,
464 };
465
466 static const struct ata_port_operations nv_nf2_ops = {
467         .tf_load                = ata_tf_load,
468         .tf_read                = ata_tf_read,
469         .exec_command           = ata_exec_command,
470         .check_status           = ata_check_status,
471         .dev_select             = ata_std_dev_select,
472         .bmdma_setup            = ata_bmdma_setup,
473         .bmdma_start            = ata_bmdma_start,
474         .bmdma_stop             = ata_bmdma_stop,
475         .bmdma_status           = ata_bmdma_status,
476         .qc_prep                = ata_qc_prep,
477         .qc_issue               = ata_qc_issue_prot,
478         .freeze                 = nv_nf2_freeze,
479         .thaw                   = nv_nf2_thaw,
480         .error_handler          = nv_error_handler,
481         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
482         .data_xfer              = ata_data_xfer,
483         .irq_clear              = ata_bmdma_irq_clear,
484         .irq_on                 = ata_irq_on,
485         .scr_read               = nv_scr_read,
486         .scr_write              = nv_scr_write,
487         .port_start             = ata_port_start,
488 };
489
490 static const struct ata_port_operations nv_ck804_ops = {
491         .tf_load                = ata_tf_load,
492         .tf_read                = ata_tf_read,
493         .exec_command           = ata_exec_command,
494         .check_status           = ata_check_status,
495         .dev_select             = ata_std_dev_select,
496         .bmdma_setup            = ata_bmdma_setup,
497         .bmdma_start            = ata_bmdma_start,
498         .bmdma_stop             = ata_bmdma_stop,
499         .bmdma_status           = ata_bmdma_status,
500         .qc_prep                = ata_qc_prep,
501         .qc_issue               = ata_qc_issue_prot,
502         .freeze                 = nv_ck804_freeze,
503         .thaw                   = nv_ck804_thaw,
504         .error_handler          = nv_error_handler,
505         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
506         .data_xfer              = ata_data_xfer,
507         .irq_clear              = ata_bmdma_irq_clear,
508         .irq_on                 = ata_irq_on,
509         .scr_read               = nv_scr_read,
510         .scr_write              = nv_scr_write,
511         .port_start             = ata_port_start,
512         .host_stop              = nv_ck804_host_stop,
513 };
514
515 static const struct ata_port_operations nv_adma_ops = {
516         .tf_load                = ata_tf_load,
517         .tf_read                = nv_adma_tf_read,
518         .check_atapi_dma        = nv_adma_check_atapi_dma,
519         .exec_command           = ata_exec_command,
520         .check_status           = ata_check_status,
521         .dev_select             = ata_std_dev_select,
522         .bmdma_setup            = ata_bmdma_setup,
523         .bmdma_start            = ata_bmdma_start,
524         .bmdma_stop             = ata_bmdma_stop,
525         .bmdma_status           = ata_bmdma_status,
526         .qc_defer               = ata_std_qc_defer,
527         .qc_prep                = nv_adma_qc_prep,
528         .qc_issue               = nv_adma_qc_issue,
529         .freeze                 = nv_adma_freeze,
530         .thaw                   = nv_adma_thaw,
531         .error_handler          = nv_adma_error_handler,
532         .post_internal_cmd      = nv_adma_post_internal_cmd,
533         .data_xfer              = ata_data_xfer,
534         .irq_clear              = nv_adma_irq_clear,
535         .irq_on                 = ata_irq_on,
536         .scr_read               = nv_scr_read,
537         .scr_write              = nv_scr_write,
538         .port_start             = nv_adma_port_start,
539         .port_stop              = nv_adma_port_stop,
540 #ifdef CONFIG_PM
541         .port_suspend           = nv_adma_port_suspend,
542         .port_resume            = nv_adma_port_resume,
543 #endif
544         .host_stop              = nv_adma_host_stop,
545 };
546
547 static const struct ata_port_operations nv_swncq_ops = {
548         .tf_load                = ata_tf_load,
549         .tf_read                = ata_tf_read,
550         .exec_command           = ata_exec_command,
551         .check_status           = ata_check_status,
552         .dev_select             = ata_std_dev_select,
553         .bmdma_setup            = ata_bmdma_setup,
554         .bmdma_start            = ata_bmdma_start,
555         .bmdma_stop             = ata_bmdma_stop,
556         .bmdma_status           = ata_bmdma_status,
557         .qc_defer               = ata_std_qc_defer,
558         .qc_prep                = nv_swncq_qc_prep,
559         .qc_issue               = nv_swncq_qc_issue,
560         .freeze                 = nv_mcp55_freeze,
561         .thaw                   = nv_mcp55_thaw,
562         .error_handler          = nv_swncq_error_handler,
563         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
564         .data_xfer              = ata_data_xfer,
565         .irq_clear              = ata_bmdma_irq_clear,
566         .irq_on                 = ata_irq_on,
567         .scr_read               = nv_scr_read,
568         .scr_write              = nv_scr_write,
569 #ifdef CONFIG_PM
570         .port_suspend           = nv_swncq_port_suspend,
571         .port_resume            = nv_swncq_port_resume,
572 #endif
573         .port_start             = nv_swncq_port_start,
574 };
575
576 static const struct ata_port_info nv_port_info[] = {
577         /* generic */
578         {
579                 .sht            = &nv_sht,
580                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
581                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
582                 .pio_mask       = NV_PIO_MASK,
583                 .mwdma_mask     = NV_MWDMA_MASK,
584                 .udma_mask      = NV_UDMA_MASK,
585                 .port_ops       = &nv_generic_ops,
586                 .irq_handler    = nv_generic_interrupt,
587         },
588         /* nforce2/3 */
589         {
590                 .sht            = &nv_sht,
591                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
592                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
593                 .pio_mask       = NV_PIO_MASK,
594                 .mwdma_mask     = NV_MWDMA_MASK,
595                 .udma_mask      = NV_UDMA_MASK,
596                 .port_ops       = &nv_nf2_ops,
597                 .irq_handler    = nv_nf2_interrupt,
598         },
599         /* ck804 */
600         {
601                 .sht            = &nv_sht,
602                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
603                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
604                 .pio_mask       = NV_PIO_MASK,
605                 .mwdma_mask     = NV_MWDMA_MASK,
606                 .udma_mask      = NV_UDMA_MASK,
607                 .port_ops       = &nv_ck804_ops,
608                 .irq_handler    = nv_ck804_interrupt,
609         },
610         /* ADMA */
611         {
612                 .sht            = &nv_adma_sht,
613                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
614                                   ATA_FLAG_MMIO | ATA_FLAG_NCQ,
615                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
616                 .pio_mask       = NV_PIO_MASK,
617                 .mwdma_mask     = NV_MWDMA_MASK,
618                 .udma_mask      = NV_UDMA_MASK,
619                 .port_ops       = &nv_adma_ops,
620                 .irq_handler    = nv_adma_interrupt,
621         },
622         /* SWNCQ */
623         {
624                 .sht            = &nv_swncq_sht,
625                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
626                                   ATA_FLAG_NCQ,
627                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
628                 .pio_mask       = NV_PIO_MASK,
629                 .mwdma_mask     = NV_MWDMA_MASK,
630                 .udma_mask      = NV_UDMA_MASK,
631                 .port_ops       = &nv_swncq_ops,
632                 .irq_handler    = nv_swncq_interrupt,
633         },
634 };
635
636 MODULE_AUTHOR("NVIDIA");
637 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
638 MODULE_LICENSE("GPL");
639 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
640 MODULE_VERSION(DRV_VERSION);
641
642 static int adma_enabled = 1;
643 static int swncq_enabled;
644
645 static void nv_adma_register_mode(struct ata_port *ap)
646 {
647         struct nv_adma_port_priv *pp = ap->private_data;
648         void __iomem *mmio = pp->ctl_block;
649         u16 tmp, status;
650         int count = 0;
651
652         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
653                 return;
654
655         status = readw(mmio + NV_ADMA_STAT);
656         while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
657                 ndelay(50);
658                 status = readw(mmio + NV_ADMA_STAT);
659                 count++;
660         }
661         if (count == 20)
662                 ata_port_printk(ap, KERN_WARNING,
663                         "timeout waiting for ADMA IDLE, stat=0x%hx\n",
664                         status);
665
666         tmp = readw(mmio + NV_ADMA_CTL);
667         writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
668
669         count = 0;
670         status = readw(mmio + NV_ADMA_STAT);
671         while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
672                 ndelay(50);
673                 status = readw(mmio + NV_ADMA_STAT);
674                 count++;
675         }
676         if (count == 20)
677                 ata_port_printk(ap, KERN_WARNING,
678                          "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
679                          status);
680
681         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
682 }
683
684 static void nv_adma_mode(struct ata_port *ap)
685 {
686         struct nv_adma_port_priv *pp = ap->private_data;
687         void __iomem *mmio = pp->ctl_block;
688         u16 tmp, status;
689         int count = 0;
690
691         if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
692                 return;
693
694         WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
695
696         tmp = readw(mmio + NV_ADMA_CTL);
697         writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
698
699         status = readw(mmio + NV_ADMA_STAT);
700         while (((status & NV_ADMA_STAT_LEGACY) ||
701               !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
702                 ndelay(50);
703                 status = readw(mmio + NV_ADMA_STAT);
704                 count++;
705         }
706         if (count == 20)
707                 ata_port_printk(ap, KERN_WARNING,
708                         "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
709                         status);
710
711         pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
712 }
713
714 static int nv_adma_slave_config(struct scsi_device *sdev)
715 {
716         struct ata_port *ap = ata_shost_to_port(sdev->host);
717         struct nv_adma_port_priv *pp = ap->private_data;
718         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
719         u64 bounce_limit;
720         unsigned long segment_boundary;
721         unsigned short sg_tablesize;
722         int rc;
723         int adma_enable;
724         u32 current_reg, new_reg, config_mask;
725
726         rc = ata_scsi_slave_config(sdev);
727
728         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
729                 /* Not a proper libata device, ignore */
730                 return rc;
731
732         if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
733                 /*
734                  * NVIDIA reports that ADMA mode does not support ATAPI commands.
735                  * Therefore ATAPI commands are sent through the legacy interface.
736                  * However, the legacy interface only supports 32-bit DMA.
737                  * Restrict DMA parameters as required by the legacy interface
738                  * when an ATAPI device is connected.
739                  */
740                 bounce_limit = ATA_DMA_MASK;
741                 segment_boundary = ATA_DMA_BOUNDARY;
742                 /* Subtract 1 since an extra entry may be needed for padding, see
743                    libata-scsi.c */
744                 sg_tablesize = LIBATA_MAX_PRD - 1;
745
746                 /* Since the legacy DMA engine is in use, we need to disable ADMA
747                    on the port. */
748                 adma_enable = 0;
749                 nv_adma_register_mode(ap);
750         } else {
751                 bounce_limit = *ap->dev->dma_mask;
752                 segment_boundary = NV_ADMA_DMA_BOUNDARY;
753                 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
754                 adma_enable = 1;
755         }
756
757         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
758
759         if (ap->port_no == 1)
760                 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
761                               NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
762         else
763                 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
764                               NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
765
766         if (adma_enable) {
767                 new_reg = current_reg | config_mask;
768                 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
769         } else {
770                 new_reg = current_reg & ~config_mask;
771                 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
772         }
773
774         if (current_reg != new_reg)
775                 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
776
777         blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
778         blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
779         blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
780         ata_port_printk(ap, KERN_INFO,
781                 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
782                 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
783         return rc;
784 }
785
786 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
787 {
788         struct nv_adma_port_priv *pp = qc->ap->private_data;
789         return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
790 }
791
792 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
793 {
794         /* Other than when internal or pass-through commands are executed,
795            the only time this function will be called in ADMA mode will be
796            if a command fails. In the failure case we don't care about going
797            into register mode with ADMA commands pending, as the commands will
798            all shortly be aborted anyway. We assume that NCQ commands are not
799            issued via passthrough, which is the only way that switching into
800            ADMA mode could abort outstanding commands. */
801         nv_adma_register_mode(ap);
802
803         ata_tf_read(ap, tf);
804 }
805
806 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
807 {
808         unsigned int idx = 0;
809
810         if (tf->flags & ATA_TFLAG_ISADDR) {
811                 if (tf->flags & ATA_TFLAG_LBA48) {
812                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
813                         cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
814                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
815                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
816                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
817                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
818                 } else
819                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
820
821                 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
822                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
823                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
824                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
825         }
826
827         if (tf->flags & ATA_TFLAG_DEVICE)
828                 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
829
830         cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
831
832         while (idx < 12)
833                 cpb[idx++] = cpu_to_le16(IGN);
834
835         return idx;
836 }
837
838 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
839 {
840         struct nv_adma_port_priv *pp = ap->private_data;
841         u8 flags = pp->cpb[cpb_num].resp_flags;
842
843         VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
844
845         if (unlikely((force_err ||
846                      flags & (NV_CPB_RESP_ATA_ERR |
847                               NV_CPB_RESP_CMD_ERR |
848                               NV_CPB_RESP_CPB_ERR)))) {
849                 struct ata_eh_info *ehi = &ap->link.eh_info;
850                 int freeze = 0;
851
852                 ata_ehi_clear_desc(ehi);
853                 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
854                 if (flags & NV_CPB_RESP_ATA_ERR) {
855                         ata_ehi_push_desc(ehi, "ATA error");
856                         ehi->err_mask |= AC_ERR_DEV;
857                 } else if (flags & NV_CPB_RESP_CMD_ERR) {
858                         ata_ehi_push_desc(ehi, "CMD error");
859                         ehi->err_mask |= AC_ERR_DEV;
860                 } else if (flags & NV_CPB_RESP_CPB_ERR) {
861                         ata_ehi_push_desc(ehi, "CPB error");
862                         ehi->err_mask |= AC_ERR_SYSTEM;
863                         freeze = 1;
864                 } else {
865                         /* notifier error, but no error in CPB flags? */
866                         ata_ehi_push_desc(ehi, "unknown");
867                         ehi->err_mask |= AC_ERR_OTHER;
868                         freeze = 1;
869                 }
870                 /* Kill all commands. EH will determine what actually failed. */
871                 if (freeze)
872                         ata_port_freeze(ap);
873                 else
874                         ata_port_abort(ap);
875                 return 1;
876         }
877
878         if (likely(flags & NV_CPB_RESP_DONE)) {
879                 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
880                 VPRINTK("CPB flags done, flags=0x%x\n", flags);
881                 if (likely(qc)) {
882                         DPRINTK("Completing qc from tag %d\n", cpb_num);
883                         ata_qc_complete(qc);
884                 } else {
885                         struct ata_eh_info *ehi = &ap->link.eh_info;
886                         /* Notifier bits set without a command may indicate the drive
887                            is misbehaving. Raise host state machine violation on this
888                            condition. */
889                         ata_port_printk(ap, KERN_ERR,
890                                         "notifier for tag %d with no cmd?\n",
891                                         cpb_num);
892                         ehi->err_mask |= AC_ERR_HSM;
893                         ehi->action |= ATA_EH_SOFTRESET;
894                         ata_port_freeze(ap);
895                         return 1;
896                 }
897         }
898         return 0;
899 }
900
901 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
902 {
903         struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
904
905         /* freeze if hotplugged */
906         if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
907                 ata_port_freeze(ap);
908                 return 1;
909         }
910
911         /* bail out if not our interrupt */
912         if (!(irq_stat & NV_INT_DEV))
913                 return 0;
914
915         /* DEV interrupt w/ no active qc? */
916         if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
917                 ata_check_status(ap);
918                 return 1;
919         }
920
921         /* handle interrupt */
922         return ata_host_intr(ap, qc);
923 }
924
925 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
926 {
927         struct ata_host *host = dev_instance;
928         int i, handled = 0;
929         u32 notifier_clears[2];
930
931         spin_lock(&host->lock);
932
933         for (i = 0; i < host->n_ports; i++) {
934                 struct ata_port *ap = host->ports[i];
935                 notifier_clears[i] = 0;
936
937                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
938                         struct nv_adma_port_priv *pp = ap->private_data;
939                         void __iomem *mmio = pp->ctl_block;
940                         u16 status;
941                         u32 gen_ctl;
942                         u32 notifier, notifier_error;
943
944                         /* if ADMA is disabled, use standard ata interrupt handler */
945                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
946                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
947                                         >> (NV_INT_PORT_SHIFT * i);
948                                 handled += nv_host_intr(ap, irq_stat);
949                                 continue;
950                         }
951
952                         /* if in ATA register mode, check for standard interrupts */
953                         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
954                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
955                                         >> (NV_INT_PORT_SHIFT * i);
956                                 if (ata_tag_valid(ap->link.active_tag))
957                                         /** NV_INT_DEV indication seems unreliable at times
958                                             at least in ADMA mode. Force it on always when a
959                                             command is active, to prevent losing interrupts. */
960                                         irq_stat |= NV_INT_DEV;
961                                 handled += nv_host_intr(ap, irq_stat);
962                         }
963
964                         notifier = readl(mmio + NV_ADMA_NOTIFIER);
965                         notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
966                         notifier_clears[i] = notifier | notifier_error;
967
968                         gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
969
970                         if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
971                             !notifier_error)
972                                 /* Nothing to do */
973                                 continue;
974
975                         status = readw(mmio + NV_ADMA_STAT);
976
977                         /* Clear status. Ensure the controller sees the clearing before we start
978                            looking at any of the CPB statuses, so that any CPB completions after
979                            this point in the handler will raise another interrupt. */
980                         writew(status, mmio + NV_ADMA_STAT);
981                         readw(mmio + NV_ADMA_STAT); /* flush posted write */
982                         rmb();
983
984                         handled++; /* irq handled if we got here */
985
986                         /* freeze if hotplugged or controller error */
987                         if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
988                                                NV_ADMA_STAT_HOTUNPLUG |
989                                                NV_ADMA_STAT_TIMEOUT |
990                                                NV_ADMA_STAT_SERROR))) {
991                                 struct ata_eh_info *ehi = &ap->link.eh_info;
992
993                                 ata_ehi_clear_desc(ehi);
994                                 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
995                                 if (status & NV_ADMA_STAT_TIMEOUT) {
996                                         ehi->err_mask |= AC_ERR_SYSTEM;
997                                         ata_ehi_push_desc(ehi, "timeout");
998                                 } else if (status & NV_ADMA_STAT_HOTPLUG) {
999                                         ata_ehi_hotplugged(ehi);
1000                                         ata_ehi_push_desc(ehi, "hotplug");
1001                                 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1002                                         ata_ehi_hotplugged(ehi);
1003                                         ata_ehi_push_desc(ehi, "hot unplug");
1004                                 } else if (status & NV_ADMA_STAT_SERROR) {
1005                                         /* let libata analyze SError and figure out the cause */
1006                                         ata_ehi_push_desc(ehi, "SError");
1007                                 } else
1008                                         ata_ehi_push_desc(ehi, "unknown");
1009                                 ata_port_freeze(ap);
1010                                 continue;
1011                         }
1012
1013                         if (status & (NV_ADMA_STAT_DONE |
1014                                       NV_ADMA_STAT_CPBERR |
1015                                       NV_ADMA_STAT_CMD_COMPLETE)) {
1016                                 u32 check_commands = notifier_clears[i];
1017                                 int pos, error = 0;
1018
1019                                 if (status & NV_ADMA_STAT_CPBERR) {
1020                                         /* Check all active commands */
1021                                         if (ata_tag_valid(ap->link.active_tag))
1022                                                 check_commands = 1 <<
1023                                                         ap->link.active_tag;
1024                                         else
1025                                                 check_commands = ap->
1026                                                         link.sactive;
1027                                 }
1028
1029                                 /** Check CPBs for completed commands */
1030                                 while ((pos = ffs(check_commands)) && !error) {
1031                                         pos--;
1032                                         error = nv_adma_check_cpb(ap, pos,
1033                                                 notifier_error & (1 << pos));
1034                                         check_commands &= ~(1 << pos);
1035                                 }
1036                         }
1037                 }
1038         }
1039
1040         if (notifier_clears[0] || notifier_clears[1]) {
1041                 /* Note: Both notifier clear registers must be written
1042                    if either is set, even if one is zero, according to NVIDIA. */
1043                 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1044                 writel(notifier_clears[0], pp->notifier_clear_block);
1045                 pp = host->ports[1]->private_data;
1046                 writel(notifier_clears[1], pp->notifier_clear_block);
1047         }
1048
1049         spin_unlock(&host->lock);
1050
1051         return IRQ_RETVAL(handled);
1052 }
1053
1054 static void nv_adma_freeze(struct ata_port *ap)
1055 {
1056         struct nv_adma_port_priv *pp = ap->private_data;
1057         void __iomem *mmio = pp->ctl_block;
1058         u16 tmp;
1059
1060         nv_ck804_freeze(ap);
1061
1062         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1063                 return;
1064
1065         /* clear any outstanding CK804 notifications */
1066         writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1067                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1068
1069         /* Disable interrupt */
1070         tmp = readw(mmio + NV_ADMA_CTL);
1071         writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1072                 mmio + NV_ADMA_CTL);
1073         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1074 }
1075
1076 static void nv_adma_thaw(struct ata_port *ap)
1077 {
1078         struct nv_adma_port_priv *pp = ap->private_data;
1079         void __iomem *mmio = pp->ctl_block;
1080         u16 tmp;
1081
1082         nv_ck804_thaw(ap);
1083
1084         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1085                 return;
1086
1087         /* Enable interrupt */
1088         tmp = readw(mmio + NV_ADMA_CTL);
1089         writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1090                 mmio + NV_ADMA_CTL);
1091         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1092 }
1093
1094 static void nv_adma_irq_clear(struct ata_port *ap)
1095 {
1096         struct nv_adma_port_priv *pp = ap->private_data;
1097         void __iomem *mmio = pp->ctl_block;
1098         u32 notifier_clears[2];
1099
1100         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1101                 ata_bmdma_irq_clear(ap);
1102                 return;
1103         }
1104
1105         /* clear any outstanding CK804 notifications */
1106         writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1107                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1108
1109         /* clear ADMA status */
1110         writew(0xffff, mmio + NV_ADMA_STAT);
1111
1112         /* clear notifiers - note both ports need to be written with
1113            something even though we are only clearing on one */
1114         if (ap->port_no == 0) {
1115                 notifier_clears[0] = 0xFFFFFFFF;
1116                 notifier_clears[1] = 0;
1117         } else {
1118                 notifier_clears[0] = 0;
1119                 notifier_clears[1] = 0xFFFFFFFF;
1120         }
1121         pp = ap->host->ports[0]->private_data;
1122         writel(notifier_clears[0], pp->notifier_clear_block);
1123         pp = ap->host->ports[1]->private_data;
1124         writel(notifier_clears[1], pp->notifier_clear_block);
1125 }
1126
1127 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1128 {
1129         struct nv_adma_port_priv *pp = qc->ap->private_data;
1130
1131         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1132                 ata_bmdma_post_internal_cmd(qc);
1133 }
1134
1135 static int nv_adma_port_start(struct ata_port *ap)
1136 {
1137         struct device *dev = ap->host->dev;
1138         struct nv_adma_port_priv *pp;
1139         int rc;
1140         void *mem;
1141         dma_addr_t mem_dma;
1142         void __iomem *mmio;
1143         u16 tmp;
1144
1145         VPRINTK("ENTER\n");
1146
1147         rc = ata_port_start(ap);
1148         if (rc)
1149                 return rc;
1150
1151         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1152         if (!pp)
1153                 return -ENOMEM;
1154
1155         mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1156                ap->port_no * NV_ADMA_PORT_SIZE;
1157         pp->ctl_block = mmio;
1158         pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1159         pp->notifier_clear_block = pp->gen_block +
1160                NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1161
1162         mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1163                                   &mem_dma, GFP_KERNEL);
1164         if (!mem)
1165                 return -ENOMEM;
1166         memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1167
1168         /*
1169          * First item in chunk of DMA memory:
1170          * 128-byte command parameter block (CPB)
1171          * one for each command tag
1172          */
1173         pp->cpb     = mem;
1174         pp->cpb_dma = mem_dma;
1175
1176         writel(mem_dma & 0xFFFFFFFF,    mmio + NV_ADMA_CPB_BASE_LOW);
1177         writel((mem_dma >> 16) >> 16,   mmio + NV_ADMA_CPB_BASE_HIGH);
1178
1179         mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1180         mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1181
1182         /*
1183          * Second item: block of ADMA_SGTBL_LEN s/g entries
1184          */
1185         pp->aprd = mem;
1186         pp->aprd_dma = mem_dma;
1187
1188         ap->private_data = pp;
1189
1190         /* clear any outstanding interrupt conditions */
1191         writew(0xffff, mmio + NV_ADMA_STAT);
1192
1193         /* initialize port variables */
1194         pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1195
1196         /* clear CPB fetch count */
1197         writew(0, mmio + NV_ADMA_CPB_COUNT);
1198
1199         /* clear GO for register mode, enable interrupt */
1200         tmp = readw(mmio + NV_ADMA_CTL);
1201         writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1202                 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1203
1204         tmp = readw(mmio + NV_ADMA_CTL);
1205         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1206         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1207         udelay(1);
1208         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1209         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1210
1211         return 0;
1212 }
1213
1214 static void nv_adma_port_stop(struct ata_port *ap)
1215 {
1216         struct nv_adma_port_priv *pp = ap->private_data;
1217         void __iomem *mmio = pp->ctl_block;
1218
1219         VPRINTK("ENTER\n");
1220         writew(0, mmio + NV_ADMA_CTL);
1221 }
1222
1223 #ifdef CONFIG_PM
1224 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1225 {
1226         struct nv_adma_port_priv *pp = ap->private_data;
1227         void __iomem *mmio = pp->ctl_block;
1228
1229         /* Go to register mode - clears GO */
1230         nv_adma_register_mode(ap);
1231
1232         /* clear CPB fetch count */
1233         writew(0, mmio + NV_ADMA_CPB_COUNT);
1234
1235         /* disable interrupt, shut down port */
1236         writew(0, mmio + NV_ADMA_CTL);
1237
1238         return 0;
1239 }
1240
1241 static int nv_adma_port_resume(struct ata_port *ap)
1242 {
1243         struct nv_adma_port_priv *pp = ap->private_data;
1244         void __iomem *mmio = pp->ctl_block;
1245         u16 tmp;
1246
1247         /* set CPB block location */
1248         writel(pp->cpb_dma & 0xFFFFFFFF,        mmio + NV_ADMA_CPB_BASE_LOW);
1249         writel((pp->cpb_dma >> 16) >> 16,       mmio + NV_ADMA_CPB_BASE_HIGH);
1250
1251         /* clear any outstanding interrupt conditions */
1252         writew(0xffff, mmio + NV_ADMA_STAT);
1253
1254         /* initialize port variables */
1255         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1256
1257         /* clear CPB fetch count */
1258         writew(0, mmio + NV_ADMA_CPB_COUNT);
1259
1260         /* clear GO for register mode, enable interrupt */
1261         tmp = readw(mmio + NV_ADMA_CTL);
1262         writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1263                 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1264
1265         tmp = readw(mmio + NV_ADMA_CTL);
1266         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1267         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1268         udelay(1);
1269         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1270         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1271
1272         return 0;
1273 }
1274 #endif
1275
1276 static void nv_adma_setup_port(struct ata_port *ap)
1277 {
1278         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1279         struct ata_ioports *ioport = &ap->ioaddr;
1280
1281         VPRINTK("ENTER\n");
1282
1283         mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1284
1285         ioport->cmd_addr        = mmio;
1286         ioport->data_addr       = mmio + (ATA_REG_DATA * 4);
1287         ioport->error_addr      =
1288         ioport->feature_addr    = mmio + (ATA_REG_ERR * 4);
1289         ioport->nsect_addr      = mmio + (ATA_REG_NSECT * 4);
1290         ioport->lbal_addr       = mmio + (ATA_REG_LBAL * 4);
1291         ioport->lbam_addr       = mmio + (ATA_REG_LBAM * 4);
1292         ioport->lbah_addr       = mmio + (ATA_REG_LBAH * 4);
1293         ioport->device_addr     = mmio + (ATA_REG_DEVICE * 4);
1294         ioport->status_addr     =
1295         ioport->command_addr    = mmio + (ATA_REG_STATUS * 4);
1296         ioport->altstatus_addr  =
1297         ioport->ctl_addr        = mmio + 0x20;
1298 }
1299
1300 static int nv_adma_host_init(struct ata_host *host)
1301 {
1302         struct pci_dev *pdev = to_pci_dev(host->dev);
1303         unsigned int i;
1304         u32 tmp32;
1305
1306         VPRINTK("ENTER\n");
1307
1308         /* enable ADMA on the ports */
1309         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1310         tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1311                  NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1312                  NV_MCP_SATA_CFG_20_PORT1_EN |
1313                  NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1314
1315         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1316
1317         for (i = 0; i < host->n_ports; i++)
1318                 nv_adma_setup_port(host->ports[i]);
1319
1320         return 0;
1321 }
1322
1323 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1324                               struct scatterlist *sg,
1325                               int idx,
1326                               struct nv_adma_prd *aprd)
1327 {
1328         u8 flags = 0;
1329         if (qc->tf.flags & ATA_TFLAG_WRITE)
1330                 flags |= NV_APRD_WRITE;
1331         if (idx == qc->n_elem - 1)
1332                 flags |= NV_APRD_END;
1333         else if (idx != 4)
1334                 flags |= NV_APRD_CONT;
1335
1336         aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1337         aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1338         aprd->flags = flags;
1339         aprd->packet_len = 0;
1340 }
1341
1342 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1343 {
1344         struct nv_adma_port_priv *pp = qc->ap->private_data;
1345         struct nv_adma_prd *aprd;
1346         struct scatterlist *sg;
1347         unsigned int si;
1348
1349         VPRINTK("ENTER\n");
1350
1351         for_each_sg(qc->sg, sg, qc->n_elem, si) {
1352                 aprd = (si < 5) ? &cpb->aprd[si] :
1353                                &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1354                 nv_adma_fill_aprd(qc, sg, si, aprd);
1355         }
1356         if (si > 5)
1357                 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1358         else
1359                 cpb->next_aprd = cpu_to_le64(0);
1360 }
1361
1362 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1363 {
1364         struct nv_adma_port_priv *pp = qc->ap->private_data;
1365
1366         /* ADMA engine can only be used for non-ATAPI DMA commands,
1367            or interrupt-driven no-data commands. */
1368         if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1369            (qc->tf.flags & ATA_TFLAG_POLLING))
1370                 return 1;
1371
1372         if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1373            (qc->tf.protocol == ATA_PROT_NODATA))
1374                 return 0;
1375
1376         return 1;
1377 }
1378
1379 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1380 {
1381         struct nv_adma_port_priv *pp = qc->ap->private_data;
1382         struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1383         u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1384                        NV_CPB_CTL_IEN;
1385
1386         if (nv_adma_use_reg_mode(qc)) {
1387                 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1388                         (qc->flags & ATA_QCFLAG_DMAMAP));
1389                 nv_adma_register_mode(qc->ap);
1390                 ata_qc_prep(qc);
1391                 return;
1392         }
1393
1394         cpb->resp_flags = NV_CPB_RESP_DONE;
1395         wmb();
1396         cpb->ctl_flags = 0;
1397         wmb();
1398
1399         cpb->len                = 3;
1400         cpb->tag                = qc->tag;
1401         cpb->next_cpb_idx       = 0;
1402
1403         /* turn on NCQ flags for NCQ commands */
1404         if (qc->tf.protocol == ATA_PROT_NCQ)
1405                 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1406
1407         VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1408
1409         nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1410
1411         if (qc->flags & ATA_QCFLAG_DMAMAP) {
1412                 nv_adma_fill_sg(qc, cpb);
1413                 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1414         } else
1415                 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1416
1417         /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1418            until we are finished filling in all of the contents */
1419         wmb();
1420         cpb->ctl_flags = ctl_flags;
1421         wmb();
1422         cpb->resp_flags = 0;
1423 }
1424
1425 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1426 {
1427         struct nv_adma_port_priv *pp = qc->ap->private_data;
1428         void __iomem *mmio = pp->ctl_block;
1429         int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1430
1431         VPRINTK("ENTER\n");
1432
1433         /* We can't handle result taskfile with NCQ commands, since
1434            retrieving the taskfile switches us out of ADMA mode and would abort
1435            existing commands. */
1436         if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1437                      (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1438                 ata_dev_printk(qc->dev, KERN_ERR,
1439                         "NCQ w/ RESULT_TF not allowed\n");
1440                 return AC_ERR_SYSTEM;
1441         }
1442
1443         if (nv_adma_use_reg_mode(qc)) {
1444                 /* use ATA register mode */
1445                 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1446                 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1447                         (qc->flags & ATA_QCFLAG_DMAMAP));
1448                 nv_adma_register_mode(qc->ap);
1449                 return ata_qc_issue_prot(qc);
1450         } else
1451                 nv_adma_mode(qc->ap);
1452
1453         /* write append register, command tag in lower 8 bits
1454            and (number of cpbs to append -1) in top 8 bits */
1455         wmb();
1456
1457         if (curr_ncq != pp->last_issue_ncq) {
1458                 /* Seems to need some delay before switching between NCQ and
1459                    non-NCQ commands, else we get command timeouts and such. */
1460                 udelay(20);
1461                 pp->last_issue_ncq = curr_ncq;
1462         }
1463
1464         writew(qc->tag, mmio + NV_ADMA_APPEND);
1465
1466         DPRINTK("Issued tag %u\n", qc->tag);
1467
1468         return 0;
1469 }
1470
1471 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1472 {
1473         struct ata_host *host = dev_instance;
1474         unsigned int i;
1475         unsigned int handled = 0;
1476         unsigned long flags;
1477
1478         spin_lock_irqsave(&host->lock, flags);
1479
1480         for (i = 0; i < host->n_ports; i++) {
1481                 struct ata_port *ap;
1482
1483                 ap = host->ports[i];
1484                 if (ap &&
1485                     !(ap->flags & ATA_FLAG_DISABLED)) {
1486                         struct ata_queued_cmd *qc;
1487
1488                         qc = ata_qc_from_tag(ap, ap->link.active_tag);
1489                         if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1490                                 handled += ata_host_intr(ap, qc);
1491                         else
1492                                 // No request pending?  Clear interrupt status
1493                                 // anyway, in case there's one pending.
1494                                 ap->ops->check_status(ap);
1495                 }
1496
1497         }
1498
1499         spin_unlock_irqrestore(&host->lock, flags);
1500
1501         return IRQ_RETVAL(handled);
1502 }
1503
1504 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1505 {
1506         int i, handled = 0;
1507
1508         for (i = 0; i < host->n_ports; i++) {
1509                 struct ata_port *ap = host->ports[i];
1510
1511                 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1512                         handled += nv_host_intr(ap, irq_stat);
1513
1514                 irq_stat >>= NV_INT_PORT_SHIFT;
1515         }
1516
1517         return IRQ_RETVAL(handled);
1518 }
1519
1520 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1521 {
1522         struct ata_host *host = dev_instance;
1523         u8 irq_stat;
1524         irqreturn_t ret;
1525
1526         spin_lock(&host->lock);
1527         irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1528         ret = nv_do_interrupt(host, irq_stat);
1529         spin_unlock(&host->lock);
1530
1531         return ret;
1532 }
1533
1534 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1535 {
1536         struct ata_host *host = dev_instance;
1537         u8 irq_stat;
1538         irqreturn_t ret;
1539
1540         spin_lock(&host->lock);
1541         irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1542         ret = nv_do_interrupt(host, irq_stat);
1543         spin_unlock(&host->lock);
1544
1545         return ret;
1546 }
1547
1548 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1549 {
1550         if (sc_reg > SCR_CONTROL)
1551                 return -EINVAL;
1552
1553         *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1554         return 0;
1555 }
1556
1557 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1558 {
1559         if (sc_reg > SCR_CONTROL)
1560                 return -EINVAL;
1561
1562         iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1563         return 0;
1564 }
1565
1566 static void nv_nf2_freeze(struct ata_port *ap)
1567 {
1568         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1569         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1570         u8 mask;
1571
1572         mask = ioread8(scr_addr + NV_INT_ENABLE);
1573         mask &= ~(NV_INT_ALL << shift);
1574         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1575 }
1576
1577 static void nv_nf2_thaw(struct ata_port *ap)
1578 {
1579         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1580         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1581         u8 mask;
1582
1583         iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1584
1585         mask = ioread8(scr_addr + NV_INT_ENABLE);
1586         mask |= (NV_INT_MASK << shift);
1587         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1588 }
1589
1590 static void nv_ck804_freeze(struct ata_port *ap)
1591 {
1592         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1593         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1594         u8 mask;
1595
1596         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1597         mask &= ~(NV_INT_ALL << shift);
1598         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1599 }
1600
1601 static void nv_ck804_thaw(struct ata_port *ap)
1602 {
1603         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1604         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1605         u8 mask;
1606
1607         writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1608
1609         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1610         mask |= (NV_INT_MASK << shift);
1611         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1612 }
1613
1614 static void nv_mcp55_freeze(struct ata_port *ap)
1615 {
1616         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1617         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1618         u32 mask;
1619
1620         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1621
1622         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1623         mask &= ~(NV_INT_ALL_MCP55 << shift);
1624         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1625         ata_bmdma_freeze(ap);
1626 }
1627
1628 static void nv_mcp55_thaw(struct ata_port *ap)
1629 {
1630         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1631         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1632         u32 mask;
1633
1634         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1635
1636         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1637         mask |= (NV_INT_MASK_MCP55 << shift);
1638         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1639         ata_bmdma_thaw(ap);
1640 }
1641
1642 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1643                         unsigned long deadline)
1644 {
1645         unsigned int dummy;
1646
1647         /* SATA hardreset fails to retrieve proper device signature on
1648          * some controllers.  Don't classify on hardreset.  For more
1649          * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1650          */
1651         return sata_std_hardreset(link, &dummy, deadline);
1652 }
1653
1654 static void nv_error_handler(struct ata_port *ap)
1655 {
1656         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1657                            nv_hardreset, ata_std_postreset);
1658 }
1659
1660 static void nv_adma_error_handler(struct ata_port *ap)
1661 {
1662         struct nv_adma_port_priv *pp = ap->private_data;
1663         if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1664                 void __iomem *mmio = pp->ctl_block;
1665                 int i;
1666                 u16 tmp;
1667
1668                 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1669                         u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1670                         u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1671                         u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1672                         u32 status = readw(mmio + NV_ADMA_STAT);
1673                         u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1674                         u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1675
1676                         ata_port_printk(ap, KERN_ERR,
1677                                 "EH in ADMA mode, notifier 0x%X "
1678                                 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1679                                 "next cpb count 0x%X next cpb idx 0x%x\n",
1680                                 notifier, notifier_error, gen_ctl, status,
1681                                 cpb_count, next_cpb_idx);
1682
1683                         for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1684                                 struct nv_adma_cpb *cpb = &pp->cpb[i];
1685                                 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1686                                     ap->link.sactive & (1 << i))
1687                                         ata_port_printk(ap, KERN_ERR,
1688                                                 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1689                                                 i, cpb->ctl_flags, cpb->resp_flags);
1690                         }
1691                 }
1692
1693                 /* Push us back into port register mode for error handling. */
1694                 nv_adma_register_mode(ap);
1695
1696                 /* Mark all of the CPBs as invalid to prevent them from
1697                    being executed */
1698                 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1699                         pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1700
1701                 /* clear CPB fetch count */
1702                 writew(0, mmio + NV_ADMA_CPB_COUNT);
1703
1704                 /* Reset channel */
1705                 tmp = readw(mmio + NV_ADMA_CTL);
1706                 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1707                 readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1708                 udelay(1);
1709                 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1710                 readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1711         }
1712
1713         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1714                            nv_hardreset, ata_std_postreset);
1715 }
1716
1717 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1718 {
1719         struct nv_swncq_port_priv *pp = ap->private_data;
1720         struct defer_queue *dq = &pp->defer_queue;
1721
1722         /* queue is full */
1723         WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1724         dq->defer_bits |= (1 << qc->tag);
1725         dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1726 }
1727
1728 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1729 {
1730         struct nv_swncq_port_priv *pp = ap->private_data;
1731         struct defer_queue *dq = &pp->defer_queue;
1732         unsigned int tag;
1733
1734         if (dq->head == dq->tail)       /* null queue */
1735                 return NULL;
1736
1737         tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1738         dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1739         WARN_ON(!(dq->defer_bits & (1 << tag)));
1740         dq->defer_bits &= ~(1 << tag);
1741
1742         return ata_qc_from_tag(ap, tag);
1743 }
1744
1745 static void nv_swncq_fis_reinit(struct ata_port *ap)
1746 {
1747         struct nv_swncq_port_priv *pp = ap->private_data;
1748
1749         pp->dhfis_bits = 0;
1750         pp->dmafis_bits = 0;
1751         pp->sdbfis_bits = 0;
1752         pp->ncq_flags = 0;
1753 }
1754
1755 static void nv_swncq_pp_reinit(struct ata_port *ap)
1756 {
1757         struct nv_swncq_port_priv *pp = ap->private_data;
1758         struct defer_queue *dq = &pp->defer_queue;
1759
1760         dq->head = 0;
1761         dq->tail = 0;
1762         dq->defer_bits = 0;
1763         pp->qc_active = 0;
1764         pp->last_issue_tag = ATA_TAG_POISON;
1765         nv_swncq_fis_reinit(ap);
1766 }
1767
1768 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1769 {
1770         struct nv_swncq_port_priv *pp = ap->private_data;
1771
1772         writew(fis, pp->irq_block);
1773 }
1774
1775 static void __ata_bmdma_stop(struct ata_port *ap)
1776 {
1777         struct ata_queued_cmd qc;
1778
1779         qc.ap = ap;
1780         ata_bmdma_stop(&qc);
1781 }
1782
1783 static void nv_swncq_ncq_stop(struct ata_port *ap)
1784 {
1785         struct nv_swncq_port_priv *pp = ap->private_data;
1786         unsigned int i;
1787         u32 sactive;
1788         u32 done_mask;
1789
1790         ata_port_printk(ap, KERN_ERR,
1791                         "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1792                         ap->qc_active, ap->link.sactive);
1793         ata_port_printk(ap, KERN_ERR,
1794                 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1795                 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1796                 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1797                 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1798
1799         ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1800                         ap->ops->check_status(ap),
1801                         ioread8(ap->ioaddr.error_addr));
1802
1803         sactive = readl(pp->sactive_block);
1804         done_mask = pp->qc_active ^ sactive;
1805
1806         ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1807         for (i = 0; i < ATA_MAX_QUEUE; i++) {
1808                 u8 err = 0;
1809                 if (pp->qc_active & (1 << i))
1810                         err = 0;
1811                 else if (done_mask & (1 << i))
1812                         err = 1;
1813                 else
1814                         continue;
1815
1816                 ata_port_printk(ap, KERN_ERR,
1817                                 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1818                                 (pp->dhfis_bits >> i) & 0x1,
1819                                 (pp->dmafis_bits >> i) & 0x1,
1820                                 (pp->sdbfis_bits >> i) & 0x1,
1821                                 (sactive >> i) & 0x1,
1822                                 (err ? "error! tag doesn't exit" : " "));
1823         }
1824
1825         nv_swncq_pp_reinit(ap);
1826         ap->ops->irq_clear(ap);
1827         __ata_bmdma_stop(ap);
1828         nv_swncq_irq_clear(ap, 0xffff);
1829 }
1830
1831 static void nv_swncq_error_handler(struct ata_port *ap)
1832 {
1833         struct ata_eh_context *ehc = &ap->link.eh_context;
1834
1835         if (ap->link.sactive) {
1836                 nv_swncq_ncq_stop(ap);
1837                 ehc->i.action |= ATA_EH_HARDRESET;
1838         }
1839
1840         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1841                            nv_hardreset, ata_std_postreset);
1842 }
1843
1844 #ifdef CONFIG_PM
1845 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1846 {
1847         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1848         u32 tmp;
1849
1850         /* clear irq */
1851         writel(~0, mmio + NV_INT_STATUS_MCP55);
1852
1853         /* disable irq */
1854         writel(0, mmio + NV_INT_ENABLE_MCP55);
1855
1856         /* disable swncq */
1857         tmp = readl(mmio + NV_CTL_MCP55);
1858         tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1859         writel(tmp, mmio + NV_CTL_MCP55);
1860
1861         return 0;
1862 }
1863
1864 static int nv_swncq_port_resume(struct ata_port *ap)
1865 {
1866         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1867         u32 tmp;
1868
1869         /* clear irq */
1870         writel(~0, mmio + NV_INT_STATUS_MCP55);
1871
1872         /* enable irq */
1873         writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1874
1875         /* enable swncq */
1876         tmp = readl(mmio + NV_CTL_MCP55);
1877         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1878
1879         return 0;
1880 }
1881 #endif
1882
1883 static void nv_swncq_host_init(struct ata_host *host)
1884 {
1885         u32 tmp;
1886         void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1887         struct pci_dev *pdev = to_pci_dev(host->dev);
1888         u8 regval;
1889
1890         /* disable  ECO 398 */
1891         pci_read_config_byte(pdev, 0x7f, &regval);
1892         regval &= ~(1 << 7);
1893         pci_write_config_byte(pdev, 0x7f, regval);
1894
1895         /* enable swncq */
1896         tmp = readl(mmio + NV_CTL_MCP55);
1897         VPRINTK("HOST_CTL:0x%X\n", tmp);
1898         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1899
1900         /* enable irq intr */
1901         tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1902         VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1903         writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1904
1905         /*  clear port irq */
1906         writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1907 }
1908
1909 static int nv_swncq_slave_config(struct scsi_device *sdev)
1910 {
1911         struct ata_port *ap = ata_shost_to_port(sdev->host);
1912         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1913         struct ata_device *dev;
1914         int rc;
1915         u8 rev;
1916         u8 check_maxtor = 0;
1917         unsigned char model_num[ATA_ID_PROD_LEN + 1];
1918
1919         rc = ata_scsi_slave_config(sdev);
1920         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1921                 /* Not a proper libata device, ignore */
1922                 return rc;
1923
1924         dev = &ap->link.device[sdev->id];
1925         if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1926                 return rc;
1927
1928         /* if MCP51 and Maxtor, then disable ncq */
1929         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1930                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1931                 check_maxtor = 1;
1932
1933         /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1934         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1935                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1936                 pci_read_config_byte(pdev, 0x8, &rev);
1937                 if (rev <= 0xa2)
1938                         check_maxtor = 1;
1939         }
1940
1941         if (!check_maxtor)
1942                 return rc;
1943
1944         ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1945
1946         if (strncmp(model_num, "Maxtor", 6) == 0) {
1947                 ata_scsi_change_queue_depth(sdev, 1);
1948                 ata_dev_printk(dev, KERN_NOTICE,
1949                         "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1950         }
1951
1952         return rc;
1953 }
1954
1955 static int nv_swncq_port_start(struct ata_port *ap)
1956 {
1957         struct device *dev = ap->host->dev;
1958         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1959         struct nv_swncq_port_priv *pp;
1960         int rc;
1961
1962         rc = ata_port_start(ap);
1963         if (rc)
1964                 return rc;
1965
1966         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1967         if (!pp)
1968                 return -ENOMEM;
1969
1970         pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1971                                       &pp->prd_dma, GFP_KERNEL);
1972         if (!pp->prd)
1973                 return -ENOMEM;
1974         memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1975
1976         ap->private_data = pp;
1977         pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1978         pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1979         pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1980
1981         return 0;
1982 }
1983
1984 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1985 {
1986         if (qc->tf.protocol != ATA_PROT_NCQ) {
1987                 ata_qc_prep(qc);
1988                 return;
1989         }
1990
1991         if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1992                 return;
1993
1994         nv_swncq_fill_sg(qc);
1995 }
1996
1997 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1998 {
1999         struct ata_port *ap = qc->ap;
2000         struct scatterlist *sg;
2001         struct nv_swncq_port_priv *pp = ap->private_data;
2002         struct ata_prd *prd;
2003         unsigned int si, idx;
2004
2005         prd = pp->prd + ATA_MAX_PRD * qc->tag;
2006
2007         idx = 0;
2008         for_each_sg(qc->sg, sg, qc->n_elem, si) {
2009                 u32 addr, offset;
2010                 u32 sg_len, len;
2011
2012                 addr = (u32)sg_dma_address(sg);
2013                 sg_len = sg_dma_len(sg);
2014
2015                 while (sg_len) {
2016                         offset = addr & 0xffff;
2017                         len = sg_len;
2018                         if ((offset + sg_len) > 0x10000)
2019                                 len = 0x10000 - offset;
2020
2021                         prd[idx].addr = cpu_to_le32(addr);
2022                         prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2023
2024                         idx++;
2025                         sg_len -= len;
2026                         addr += len;
2027                 }
2028         }
2029
2030         prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2031 }
2032
2033 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2034                                           struct ata_queued_cmd *qc)
2035 {
2036         struct nv_swncq_port_priv *pp = ap->private_data;
2037
2038         if (qc == NULL)
2039                 return 0;
2040
2041         DPRINTK("Enter\n");
2042
2043         writel((1 << qc->tag), pp->sactive_block);
2044         pp->last_issue_tag = qc->tag;
2045         pp->dhfis_bits &= ~(1 << qc->tag);
2046         pp->dmafis_bits &= ~(1 << qc->tag);
2047         pp->qc_active |= (0x1 << qc->tag);
2048
2049         ap->ops->tf_load(ap, &qc->tf);   /* load tf registers */
2050         ap->ops->exec_command(ap, &qc->tf);
2051
2052         DPRINTK("Issued tag %u\n", qc->tag);
2053
2054         return 0;
2055 }
2056
2057 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2058 {
2059         struct ata_port *ap = qc->ap;
2060         struct nv_swncq_port_priv *pp = ap->private_data;
2061
2062         if (qc->tf.protocol != ATA_PROT_NCQ)
2063                 return ata_qc_issue_prot(qc);
2064
2065         DPRINTK("Enter\n");
2066
2067         if (!pp->qc_active)
2068                 nv_swncq_issue_atacmd(ap, qc);
2069         else
2070                 nv_swncq_qc_to_dq(ap, qc);      /* add qc to defer queue */
2071
2072         return 0;
2073 }
2074
2075 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2076 {
2077         u32 serror;
2078         struct ata_eh_info *ehi = &ap->link.eh_info;
2079
2080         ata_ehi_clear_desc(ehi);
2081
2082         /* AHCI needs SError cleared; otherwise, it might lock up */
2083         sata_scr_read(&ap->link, SCR_ERROR, &serror);
2084         sata_scr_write(&ap->link, SCR_ERROR, serror);
2085
2086         /* analyze @irq_stat */
2087         if (fis & NV_SWNCQ_IRQ_ADDED)
2088                 ata_ehi_push_desc(ehi, "hot plug");
2089         else if (fis & NV_SWNCQ_IRQ_REMOVED)
2090                 ata_ehi_push_desc(ehi, "hot unplug");
2091
2092         ata_ehi_hotplugged(ehi);
2093
2094         /* okay, let's hand over to EH */
2095         ehi->serror |= serror;
2096
2097         ata_port_freeze(ap);
2098 }
2099
2100 static int nv_swncq_sdbfis(struct ata_port *ap)
2101 {
2102         struct ata_queued_cmd *qc;
2103         struct nv_swncq_port_priv *pp = ap->private_data;
2104         struct ata_eh_info *ehi = &ap->link.eh_info;
2105         u32 sactive;
2106         int nr_done = 0;
2107         u32 done_mask;
2108         int i;
2109         u8 host_stat;
2110         u8 lack_dhfis = 0;
2111
2112         host_stat = ap->ops->bmdma_status(ap);
2113         if (unlikely(host_stat & ATA_DMA_ERR)) {
2114                 /* error when transfering data to/from memory */
2115                 ata_ehi_clear_desc(ehi);
2116                 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2117                 ehi->err_mask |= AC_ERR_HOST_BUS;
2118                 ehi->action |= ATA_EH_SOFTRESET;
2119                 return -EINVAL;
2120         }
2121
2122         ap->ops->irq_clear(ap);
2123         __ata_bmdma_stop(ap);
2124
2125         sactive = readl(pp->sactive_block);
2126         done_mask = pp->qc_active ^ sactive;
2127
2128         if (unlikely(done_mask & sactive)) {
2129                 ata_ehi_clear_desc(ehi);
2130                 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2131                                   "(%08x->%08x)", pp->qc_active, sactive);
2132                 ehi->err_mask |= AC_ERR_HSM;
2133                 ehi->action |= ATA_EH_HARDRESET;
2134                 return -EINVAL;
2135         }
2136         for (i = 0; i < ATA_MAX_QUEUE; i++) {
2137                 if (!(done_mask & (1 << i)))
2138                         continue;
2139
2140                 qc = ata_qc_from_tag(ap, i);
2141                 if (qc) {
2142                         ata_qc_complete(qc);
2143                         pp->qc_active &= ~(1 << i);
2144                         pp->dhfis_bits &= ~(1 << i);
2145                         pp->dmafis_bits &= ~(1 << i);
2146                         pp->sdbfis_bits |= (1 << i);
2147                         nr_done++;
2148                 }
2149         }
2150
2151         if (!ap->qc_active) {
2152                 DPRINTK("over\n");
2153                 nv_swncq_pp_reinit(ap);
2154                 return nr_done;
2155         }
2156
2157         if (pp->qc_active & pp->dhfis_bits)
2158                 return nr_done;
2159
2160         if ((pp->ncq_flags & ncq_saw_backout) ||
2161             (pp->qc_active ^ pp->dhfis_bits))
2162                 /* if the controller cann't get a device to host register FIS,
2163                  * The driver needs to reissue the new command.
2164                  */
2165                 lack_dhfis = 1;
2166
2167         DPRINTK("id 0x%x QC: qc_active 0x%x,"
2168                 "SWNCQ:qc_active 0x%X defer_bits %X "
2169                 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2170                 ap->print_id, ap->qc_active, pp->qc_active,
2171                 pp->defer_queue.defer_bits, pp->dhfis_bits,
2172                 pp->dmafis_bits, pp->last_issue_tag);
2173
2174         nv_swncq_fis_reinit(ap);
2175
2176         if (lack_dhfis) {
2177                 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2178                 nv_swncq_issue_atacmd(ap, qc);
2179                 return nr_done;
2180         }
2181
2182         if (pp->defer_queue.defer_bits) {
2183                 /* send deferral queue command */
2184                 qc = nv_swncq_qc_from_dq(ap);
2185                 WARN_ON(qc == NULL);
2186                 nv_swncq_issue_atacmd(ap, qc);
2187         }
2188
2189         return nr_done;
2190 }
2191
2192 static inline u32 nv_swncq_tag(struct ata_port *ap)
2193 {
2194         struct nv_swncq_port_priv *pp = ap->private_data;
2195         u32 tag;
2196
2197         tag = readb(pp->tag_block) >> 2;
2198         return (tag & 0x1f);
2199 }
2200
2201 static int nv_swncq_dmafis(struct ata_port *ap)
2202 {
2203         struct ata_queued_cmd *qc;
2204         unsigned int rw;
2205         u8 dmactl;
2206         u32 tag;
2207         struct nv_swncq_port_priv *pp = ap->private_data;
2208
2209         __ata_bmdma_stop(ap);
2210         tag = nv_swncq_tag(ap);
2211
2212         DPRINTK("dma setup tag 0x%x\n", tag);
2213         qc = ata_qc_from_tag(ap, tag);
2214
2215         if (unlikely(!qc))
2216                 return 0;
2217
2218         rw = qc->tf.flags & ATA_TFLAG_WRITE;
2219
2220         /* load PRD table addr. */
2221         iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2222                   ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2223
2224         /* specify data direction, triple-check start bit is clear */
2225         dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2226         dmactl &= ~ATA_DMA_WR;
2227         if (!rw)
2228                 dmactl |= ATA_DMA_WR;
2229
2230         iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2231
2232         return 1;
2233 }
2234
2235 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2236 {
2237         struct nv_swncq_port_priv *pp = ap->private_data;
2238         struct ata_queued_cmd *qc;
2239         struct ata_eh_info *ehi = &ap->link.eh_info;
2240         u32 serror;
2241         u8 ata_stat;
2242         int rc = 0;
2243
2244         ata_stat = ap->ops->check_status(ap);
2245         nv_swncq_irq_clear(ap, fis);
2246         if (!fis)
2247                 return;
2248
2249         if (ap->pflags & ATA_PFLAG_FROZEN)
2250                 return;
2251
2252         if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2253                 nv_swncq_hotplug(ap, fis);
2254                 return;
2255         }
2256
2257         if (!pp->qc_active)
2258                 return;
2259
2260         if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2261                 return;
2262         ap->ops->scr_write(ap, SCR_ERROR, serror);
2263
2264         if (ata_stat & ATA_ERR) {
2265                 ata_ehi_clear_desc(ehi);
2266                 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2267                 ehi->err_mask |= AC_ERR_DEV;
2268                 ehi->serror |= serror;
2269                 ehi->action |= ATA_EH_SOFTRESET;
2270                 ata_port_freeze(ap);
2271                 return;
2272         }
2273
2274         if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2275                 /* If the IRQ is backout, driver must issue
2276                  * the new command again some time later.
2277                  */
2278                 pp->ncq_flags |= ncq_saw_backout;
2279         }
2280
2281         if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2282                 pp->ncq_flags |= ncq_saw_sdb;
2283                 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2284                         "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2285                         ap->print_id, pp->qc_active, pp->dhfis_bits,
2286                         pp->dmafis_bits, readl(pp->sactive_block));
2287                 rc = nv_swncq_sdbfis(ap);
2288                 if (rc < 0)
2289                         goto irq_error;
2290         }
2291
2292         if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2293                 /* The interrupt indicates the new command
2294                  * was transmitted correctly to the drive.
2295                  */
2296                 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2297                 pp->ncq_flags |= ncq_saw_d2h;
2298                 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2299                         ata_ehi_push_desc(ehi, "illegal fis transaction");
2300                         ehi->err_mask |= AC_ERR_HSM;
2301                         ehi->action |= ATA_EH_HARDRESET;
2302                         goto irq_error;
2303                 }
2304
2305                 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2306                     !(pp->ncq_flags & ncq_saw_dmas)) {
2307                         ata_stat = ap->ops->check_status(ap);
2308                         if (ata_stat & ATA_BUSY)
2309                                 goto irq_exit;
2310
2311                         if (pp->defer_queue.defer_bits) {
2312                                 DPRINTK("send next command\n");
2313                                 qc = nv_swncq_qc_from_dq(ap);
2314                                 nv_swncq_issue_atacmd(ap, qc);
2315                         }
2316                 }
2317         }
2318
2319         if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2320                 /* program the dma controller with appropriate PRD buffers
2321                  * and start the DMA transfer for requested command.
2322                  */
2323                 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2324                 pp->ncq_flags |= ncq_saw_dmas;
2325                 rc = nv_swncq_dmafis(ap);
2326         }
2327
2328 irq_exit:
2329         return;
2330 irq_error:
2331         ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2332         ata_port_freeze(ap);
2333         return;
2334 }
2335
2336 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2337 {
2338         struct ata_host *host = dev_instance;
2339         unsigned int i;
2340         unsigned int handled = 0;
2341         unsigned long flags;
2342         u32 irq_stat;
2343
2344         spin_lock_irqsave(&host->lock, flags);
2345
2346         irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2347
2348         for (i = 0; i < host->n_ports; i++) {
2349                 struct ata_port *ap = host->ports[i];
2350
2351                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2352                         if (ap->link.sactive) {
2353                                 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2354                                 handled = 1;
2355                         } else {
2356                                 if (irq_stat)   /* reserve Hotplug */
2357                                         nv_swncq_irq_clear(ap, 0xfff0);
2358
2359                                 handled += nv_host_intr(ap, (u8)irq_stat);
2360                         }
2361                 }
2362                 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2363         }
2364
2365         spin_unlock_irqrestore(&host->lock, flags);
2366
2367         return IRQ_RETVAL(handled);
2368 }
2369
2370 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2371 {
2372         static int printed_version;
2373         const struct ata_port_info *ppi[] = { NULL, NULL };
2374         struct ata_host *host;
2375         struct nv_host_priv *hpriv;
2376         int rc;
2377         u32 bar;
2378         void __iomem *base;
2379         unsigned long type = ent->driver_data;
2380
2381         // Make sure this is a SATA controller by counting the number of bars
2382         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2383         // it's an IDE controller and we ignore it.
2384         for (bar = 0; bar < 6; bar++)
2385                 if (pci_resource_start(pdev, bar) == 0)
2386                         return -ENODEV;
2387
2388         if (!printed_version++)
2389                 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2390
2391         rc = pcim_enable_device(pdev);
2392         if (rc)
2393                 return rc;
2394
2395         /* determine type and allocate host */
2396         if (type == CK804 && adma_enabled) {
2397                 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2398                 type = ADMA;
2399         }
2400
2401         if (type == SWNCQ) {
2402                 if (swncq_enabled)
2403                         dev_printk(KERN_NOTICE, &pdev->dev,
2404                                    "Using SWNCQ mode\n");
2405                 else
2406                         type = GENERIC;
2407         }
2408
2409         ppi[0] = &nv_port_info[type];
2410         rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
2411         if (rc)
2412                 return rc;
2413
2414         hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2415         if (!hpriv)
2416                 return -ENOMEM;
2417         hpriv->type = type;
2418         host->private_data = hpriv;
2419
2420         /* set 64bit dma masks, may fail */
2421         if (type == ADMA) {
2422                 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
2423                         pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2424         }
2425
2426         /* request and iomap NV_MMIO_BAR */
2427         rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2428         if (rc)
2429                 return rc;
2430
2431         /* configure SCR access */
2432         base = host->iomap[NV_MMIO_BAR];
2433         host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2434         host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2435
2436         /* enable SATA space for CK804 */
2437         if (type >= CK804) {
2438                 u8 regval;
2439
2440                 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2441                 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2442                 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2443         }
2444
2445         /* init ADMA */
2446         if (type == ADMA) {
2447                 rc = nv_adma_host_init(host);
2448                 if (rc)
2449                         return rc;
2450         } else if (type == SWNCQ)
2451                 nv_swncq_host_init(host);
2452
2453         pci_set_master(pdev);
2454         return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
2455                                  IRQF_SHARED, ppi[0]->sht);
2456 }
2457
2458 #ifdef CONFIG_PM
2459 static int nv_pci_device_resume(struct pci_dev *pdev)
2460 {
2461         struct ata_host *host = dev_get_drvdata(&pdev->dev);
2462         struct nv_host_priv *hpriv = host->private_data;
2463         int rc;
2464
2465         rc = ata_pci_device_do_resume(pdev);
2466         if (rc)
2467                 return rc;
2468
2469         if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2470                 if (hpriv->type >= CK804) {
2471                         u8 regval;
2472
2473                         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2474                         regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2475                         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2476                 }
2477                 if (hpriv->type == ADMA) {
2478                         u32 tmp32;
2479                         struct nv_adma_port_priv *pp;
2480                         /* enable/disable ADMA on the ports appropriately */
2481                         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2482
2483                         pp = host->ports[0]->private_data;
2484                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2485                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2486                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2487                         else
2488                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2489                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2490                         pp = host->ports[1]->private_data;
2491                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2492                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2493                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2494                         else
2495                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2496                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2497
2498                         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2499                 }
2500         }
2501
2502         ata_host_resume(host);
2503
2504         return 0;
2505 }
2506 #endif
2507
2508 static void nv_ck804_host_stop(struct ata_host *host)
2509 {
2510         struct pci_dev *pdev = to_pci_dev(host->dev);
2511         u8 regval;
2512
2513         /* disable SATA space for CK804 */
2514         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2515         regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2516         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2517 }
2518
2519 static void nv_adma_host_stop(struct ata_host *host)
2520 {
2521         struct pci_dev *pdev = to_pci_dev(host->dev);
2522         u32 tmp32;
2523
2524         /* disable ADMA on the ports */
2525         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2526         tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2527                    NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2528                    NV_MCP_SATA_CFG_20_PORT1_EN |
2529                    NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2530
2531         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2532
2533         nv_ck804_host_stop(host);
2534 }
2535
2536 static int __init nv_init(void)
2537 {
2538         return pci_register_driver(&nv_pci_driver);
2539 }
2540
2541 static void __exit nv_exit(void)
2542 {
2543         pci_unregister_driver(&nv_pci_driver);
2544 }
2545
2546 module_init(nv_init);
2547 module_exit(nv_exit);
2548 module_param_named(adma, adma_enabled, bool, 0444);
2549 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2550 module_param_named(swncq, swncq_enabled, bool, 0444);
2551 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
2552