]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/ata/sata_nv.c
Merge branch 'irq-upstream' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[linux-2.6-omap-h63xx.git] / drivers / ata / sata_nv.c
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME                        "sata_nv"
52 #define DRV_VERSION                     "3.5"
53
54 #define NV_ADMA_DMA_BOUNDARY            0xffffffffUL
55
56 enum {
57         NV_MMIO_BAR                     = 5,
58
59         NV_PORTS                        = 2,
60         NV_PIO_MASK                     = 0x1f,
61         NV_MWDMA_MASK                   = 0x07,
62         NV_UDMA_MASK                    = 0x7f,
63         NV_PORT0_SCR_REG_OFFSET         = 0x00,
64         NV_PORT1_SCR_REG_OFFSET         = 0x40,
65
66         /* INT_STATUS/ENABLE */
67         NV_INT_STATUS                   = 0x10,
68         NV_INT_ENABLE                   = 0x11,
69         NV_INT_STATUS_CK804             = 0x440,
70         NV_INT_ENABLE_CK804             = 0x441,
71
72         /* INT_STATUS/ENABLE bits */
73         NV_INT_DEV                      = 0x01,
74         NV_INT_PM                       = 0x02,
75         NV_INT_ADDED                    = 0x04,
76         NV_INT_REMOVED                  = 0x08,
77
78         NV_INT_PORT_SHIFT               = 4,    /* each port occupies 4 bits */
79
80         NV_INT_ALL                      = 0x0f,
81         NV_INT_MASK                     = NV_INT_DEV |
82                                           NV_INT_ADDED | NV_INT_REMOVED,
83
84         /* INT_CONFIG */
85         NV_INT_CONFIG                   = 0x12,
86         NV_INT_CONFIG_METHD             = 0x01, // 0 = INT, 1 = SMI
87
88         // For PCI config register 20
89         NV_MCP_SATA_CFG_20              = 0x50,
90         NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91         NV_MCP_SATA_CFG_20_PORT0_EN     = (1 << 17),
92         NV_MCP_SATA_CFG_20_PORT1_EN     = (1 << 16),
93         NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94         NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96         NV_ADMA_MAX_CPBS                = 32,
97         NV_ADMA_CPB_SZ                  = 128,
98         NV_ADMA_APRD_SZ                 = 16,
99         NV_ADMA_SGTBL_LEN               = (1024 - NV_ADMA_CPB_SZ) /
100                                            NV_ADMA_APRD_SZ,
101         NV_ADMA_SGTBL_TOTAL_LEN         = NV_ADMA_SGTBL_LEN + 5,
102         NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103         NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104                                            (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106         /* BAR5 offset to ADMA general registers */
107         NV_ADMA_GEN                     = 0x400,
108         NV_ADMA_GEN_CTL                 = 0x00,
109         NV_ADMA_NOTIFIER_CLEAR          = 0x30,
110
111         /* BAR5 offset to ADMA ports */
112         NV_ADMA_PORT                    = 0x480,
113
114         /* size of ADMA port register space  */
115         NV_ADMA_PORT_SIZE               = 0x100,
116
117         /* ADMA port registers */
118         NV_ADMA_CTL                     = 0x40,
119         NV_ADMA_CPB_COUNT               = 0x42,
120         NV_ADMA_NEXT_CPB_IDX            = 0x43,
121         NV_ADMA_STAT                    = 0x44,
122         NV_ADMA_CPB_BASE_LOW            = 0x48,
123         NV_ADMA_CPB_BASE_HIGH           = 0x4C,
124         NV_ADMA_APPEND                  = 0x50,
125         NV_ADMA_NOTIFIER                = 0x68,
126         NV_ADMA_NOTIFIER_ERROR          = 0x6C,
127
128         /* NV_ADMA_CTL register bits */
129         NV_ADMA_CTL_HOTPLUG_IEN         = (1 << 0),
130         NV_ADMA_CTL_CHANNEL_RESET       = (1 << 5),
131         NV_ADMA_CTL_GO                  = (1 << 7),
132         NV_ADMA_CTL_AIEN                = (1 << 8),
133         NV_ADMA_CTL_READ_NON_COHERENT   = (1 << 11),
134         NV_ADMA_CTL_WRITE_NON_COHERENT  = (1 << 12),
135
136         /* CPB response flag bits */
137         NV_CPB_RESP_DONE                = (1 << 0),
138         NV_CPB_RESP_ATA_ERR             = (1 << 3),
139         NV_CPB_RESP_CMD_ERR             = (1 << 4),
140         NV_CPB_RESP_CPB_ERR             = (1 << 7),
141
142         /* CPB control flag bits */
143         NV_CPB_CTL_CPB_VALID            = (1 << 0),
144         NV_CPB_CTL_QUEUE                = (1 << 1),
145         NV_CPB_CTL_APRD_VALID           = (1 << 2),
146         NV_CPB_CTL_IEN                  = (1 << 3),
147         NV_CPB_CTL_FPDMA                = (1 << 4),
148
149         /* APRD flags */
150         NV_APRD_WRITE                   = (1 << 1),
151         NV_APRD_END                     = (1 << 2),
152         NV_APRD_CONT                    = (1 << 3),
153
154         /* NV_ADMA_STAT flags */
155         NV_ADMA_STAT_TIMEOUT            = (1 << 0),
156         NV_ADMA_STAT_HOTUNPLUG          = (1 << 1),
157         NV_ADMA_STAT_HOTPLUG            = (1 << 2),
158         NV_ADMA_STAT_CPBERR             = (1 << 4),
159         NV_ADMA_STAT_SERROR             = (1 << 5),
160         NV_ADMA_STAT_CMD_COMPLETE       = (1 << 6),
161         NV_ADMA_STAT_IDLE               = (1 << 8),
162         NV_ADMA_STAT_LEGACY             = (1 << 9),
163         NV_ADMA_STAT_STOPPED            = (1 << 10),
164         NV_ADMA_STAT_DONE               = (1 << 12),
165         NV_ADMA_STAT_ERR                = NV_ADMA_STAT_CPBERR |
166                                           NV_ADMA_STAT_TIMEOUT,
167
168         /* port flags */
169         NV_ADMA_PORT_REGISTER_MODE      = (1 << 0),
170         NV_ADMA_ATAPI_SETUP_COMPLETE    = (1 << 1),
171
172         /* MCP55 reg offset */
173         NV_CTL_MCP55                    = 0x400,
174         NV_INT_STATUS_MCP55             = 0x440,
175         NV_INT_ENABLE_MCP55             = 0x444,
176         NV_NCQ_REG_MCP55                = 0x448,
177
178         /* MCP55 */
179         NV_INT_ALL_MCP55                = 0xffff,
180         NV_INT_PORT_SHIFT_MCP55         = 16,   /* each port occupies 16 bits */
181         NV_INT_MASK_MCP55               = NV_INT_ALL_MCP55 & 0xfffd,
182
183         /* SWNCQ ENABLE BITS*/
184         NV_CTL_PRI_SWNCQ                = 0x02,
185         NV_CTL_SEC_SWNCQ                = 0x04,
186
187         /* SW NCQ status bits*/
188         NV_SWNCQ_IRQ_DEV                = (1 << 0),
189         NV_SWNCQ_IRQ_PM                 = (1 << 1),
190         NV_SWNCQ_IRQ_ADDED              = (1 << 2),
191         NV_SWNCQ_IRQ_REMOVED            = (1 << 3),
192
193         NV_SWNCQ_IRQ_BACKOUT            = (1 << 4),
194         NV_SWNCQ_IRQ_SDBFIS             = (1 << 5),
195         NV_SWNCQ_IRQ_DHREGFIS           = (1 << 6),
196         NV_SWNCQ_IRQ_DMASETUP           = (1 << 7),
197
198         NV_SWNCQ_IRQ_HOTPLUG            = NV_SWNCQ_IRQ_ADDED |
199                                           NV_SWNCQ_IRQ_REMOVED,
200
201 };
202
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205         __le64                  addr;
206         __le32                  len;
207         u8                      flags;
208         u8                      packet_len;
209         __le16                  reserved;
210 };
211
212 enum nv_adma_regbits {
213         CMDEND  = (1 << 15),            /* end of command list */
214         WNB     = (1 << 14),            /* wait-not-BSY */
215         IGN     = (1 << 13),            /* ignore this entry */
216         CS1n    = (1 << (4 + 8)),       /* std. PATA signals follow... */
217         DA2     = (1 << (2 + 8)),
218         DA1     = (1 << (1 + 8)),
219         DA0     = (1 << (0 + 8)),
220 };
221
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227         u8                      resp_flags;    /* 0 */
228         u8                      reserved1;     /* 1 */
229         u8                      ctl_flags;     /* 2 */
230         /* len is length of taskfile in 64 bit words */
231         u8                      len;            /* 3  */
232         u8                      tag;           /* 4 */
233         u8                      next_cpb_idx;  /* 5 */
234         __le16                  reserved2;     /* 6-7 */
235         __le16                  tf[12];        /* 8-31 */
236         struct nv_adma_prd      aprd[5];       /* 32-111 */
237         __le64                  next_aprd;     /* 112-119 */
238         __le64                  reserved3;     /* 120-127 */
239 };
240
241
242 struct nv_adma_port_priv {
243         struct nv_adma_cpb      *cpb;
244         dma_addr_t              cpb_dma;
245         struct nv_adma_prd      *aprd;
246         dma_addr_t              aprd_dma;
247         void __iomem            *ctl_block;
248         void __iomem            *gen_block;
249         void __iomem            *notifier_clear_block;
250         u8                      flags;
251         int                     last_issue_ncq;
252 };
253
254 struct nv_host_priv {
255         unsigned long           type;
256 };
257
258 struct defer_queue {
259         u32             defer_bits;
260         unsigned int    head;
261         unsigned int    tail;
262         unsigned int    tag[ATA_MAX_QUEUE];
263 };
264
265 enum ncq_saw_flag_list {
266         ncq_saw_d2h     = (1U << 0),
267         ncq_saw_dmas    = (1U << 1),
268         ncq_saw_sdb     = (1U << 2),
269         ncq_saw_backout = (1U << 3),
270 };
271
272 struct nv_swncq_port_priv {
273         struct ata_prd  *prd;    /* our SG list */
274         dma_addr_t      prd_dma; /* and its DMA mapping */
275         void __iomem    *sactive_block;
276         void __iomem    *irq_block;
277         void __iomem    *tag_block;
278         u32             qc_active;
279
280         unsigned int    last_issue_tag;
281
282         /* fifo circular queue to store deferral command */
283         struct defer_queue defer_queue;
284
285         /* for NCQ interrupt analysis */
286         u32             dhfis_bits;
287         u32             dmafis_bits;
288         u32             sdbfis_bits;
289
290         unsigned int    ncq_flags;
291 };
292
293
294 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
295
296 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
297 #ifdef CONFIG_PM
298 static int nv_pci_device_resume(struct pci_dev *pdev);
299 #endif
300 static void nv_ck804_host_stop(struct ata_host *host);
301 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
302 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
304 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
305 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
306
307 static void nv_nf2_freeze(struct ata_port *ap);
308 static void nv_nf2_thaw(struct ata_port *ap);
309 static void nv_ck804_freeze(struct ata_port *ap);
310 static void nv_ck804_thaw(struct ata_port *ap);
311 static void nv_error_handler(struct ata_port *ap);
312 static int nv_adma_slave_config(struct scsi_device *sdev);
313 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
314 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
315 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
316 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
317 static void nv_adma_irq_clear(struct ata_port *ap);
318 static int nv_adma_port_start(struct ata_port *ap);
319 static void nv_adma_port_stop(struct ata_port *ap);
320 #ifdef CONFIG_PM
321 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
322 static int nv_adma_port_resume(struct ata_port *ap);
323 #endif
324 static void nv_adma_freeze(struct ata_port *ap);
325 static void nv_adma_thaw(struct ata_port *ap);
326 static void nv_adma_error_handler(struct ata_port *ap);
327 static void nv_adma_host_stop(struct ata_host *host);
328 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
329 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
330
331 static void nv_mcp55_thaw(struct ata_port *ap);
332 static void nv_mcp55_freeze(struct ata_port *ap);
333 static void nv_swncq_error_handler(struct ata_port *ap);
334 static int nv_swncq_slave_config(struct scsi_device *sdev);
335 static int nv_swncq_port_start(struct ata_port *ap);
336 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
337 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
338 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
339 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
340 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
341 #ifdef CONFIG_PM
342 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
343 static int nv_swncq_port_resume(struct ata_port *ap);
344 #endif
345
346 enum nv_host_type
347 {
348         GENERIC,
349         NFORCE2,
350         NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
351         CK804,
352         ADMA,
353         SWNCQ,
354 };
355
356 static const struct pci_device_id nv_pci_tbl[] = {
357         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
358         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
359         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
360         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
361         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
362         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
363         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
364         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
365         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
366         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
367         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
368         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), SWNCQ },
369         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), SWNCQ },
370         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), SWNCQ },
371
372         { } /* terminate list */
373 };
374
375 static struct pci_driver nv_pci_driver = {
376         .name                   = DRV_NAME,
377         .id_table               = nv_pci_tbl,
378         .probe                  = nv_init_one,
379 #ifdef CONFIG_PM
380         .suspend                = ata_pci_device_suspend,
381         .resume                 = nv_pci_device_resume,
382 #endif
383         .remove                 = ata_pci_remove_one,
384 };
385
386 static struct scsi_host_template nv_sht = {
387         .module                 = THIS_MODULE,
388         .name                   = DRV_NAME,
389         .ioctl                  = ata_scsi_ioctl,
390         .queuecommand           = ata_scsi_queuecmd,
391         .can_queue              = ATA_DEF_QUEUE,
392         .this_id                = ATA_SHT_THIS_ID,
393         .sg_tablesize           = LIBATA_MAX_PRD,
394         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
395         .emulated               = ATA_SHT_EMULATED,
396         .use_clustering         = ATA_SHT_USE_CLUSTERING,
397         .proc_name              = DRV_NAME,
398         .dma_boundary           = ATA_DMA_BOUNDARY,
399         .slave_configure        = ata_scsi_slave_config,
400         .slave_destroy          = ata_scsi_slave_destroy,
401         .bios_param             = ata_std_bios_param,
402 };
403
404 static struct scsi_host_template nv_adma_sht = {
405         .module                 = THIS_MODULE,
406         .name                   = DRV_NAME,
407         .ioctl                  = ata_scsi_ioctl,
408         .queuecommand           = ata_scsi_queuecmd,
409         .change_queue_depth     = ata_scsi_change_queue_depth,
410         .can_queue              = NV_ADMA_MAX_CPBS,
411         .this_id                = ATA_SHT_THIS_ID,
412         .sg_tablesize           = NV_ADMA_SGTBL_TOTAL_LEN,
413         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
414         .emulated               = ATA_SHT_EMULATED,
415         .use_clustering         = ATA_SHT_USE_CLUSTERING,
416         .proc_name              = DRV_NAME,
417         .dma_boundary           = NV_ADMA_DMA_BOUNDARY,
418         .slave_configure        = nv_adma_slave_config,
419         .slave_destroy          = ata_scsi_slave_destroy,
420         .bios_param             = ata_std_bios_param,
421 };
422
423 static struct scsi_host_template nv_swncq_sht = {
424         .module                 = THIS_MODULE,
425         .name                   = DRV_NAME,
426         .ioctl                  = ata_scsi_ioctl,
427         .queuecommand           = ata_scsi_queuecmd,
428         .change_queue_depth     = ata_scsi_change_queue_depth,
429         .can_queue              = ATA_MAX_QUEUE,
430         .this_id                = ATA_SHT_THIS_ID,
431         .sg_tablesize           = LIBATA_MAX_PRD,
432         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
433         .emulated               = ATA_SHT_EMULATED,
434         .use_clustering         = ATA_SHT_USE_CLUSTERING,
435         .proc_name              = DRV_NAME,
436         .dma_boundary           = ATA_DMA_BOUNDARY,
437         .slave_configure        = nv_swncq_slave_config,
438         .slave_destroy          = ata_scsi_slave_destroy,
439         .bios_param             = ata_std_bios_param,
440 };
441
442 static const struct ata_port_operations nv_generic_ops = {
443         .tf_load                = ata_tf_load,
444         .tf_read                = ata_tf_read,
445         .exec_command           = ata_exec_command,
446         .check_status           = ata_check_status,
447         .dev_select             = ata_std_dev_select,
448         .bmdma_setup            = ata_bmdma_setup,
449         .bmdma_start            = ata_bmdma_start,
450         .bmdma_stop             = ata_bmdma_stop,
451         .bmdma_status           = ata_bmdma_status,
452         .qc_prep                = ata_qc_prep,
453         .qc_issue               = ata_qc_issue_prot,
454         .freeze                 = ata_bmdma_freeze,
455         .thaw                   = ata_bmdma_thaw,
456         .error_handler          = nv_error_handler,
457         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
458         .data_xfer              = ata_data_xfer,
459         .irq_clear              = ata_bmdma_irq_clear,
460         .irq_on                 = ata_irq_on,
461         .scr_read               = nv_scr_read,
462         .scr_write              = nv_scr_write,
463         .port_start             = ata_port_start,
464 };
465
466 static const struct ata_port_operations nv_nf2_ops = {
467         .tf_load                = ata_tf_load,
468         .tf_read                = ata_tf_read,
469         .exec_command           = ata_exec_command,
470         .check_status           = ata_check_status,
471         .dev_select             = ata_std_dev_select,
472         .bmdma_setup            = ata_bmdma_setup,
473         .bmdma_start            = ata_bmdma_start,
474         .bmdma_stop             = ata_bmdma_stop,
475         .bmdma_status           = ata_bmdma_status,
476         .qc_prep                = ata_qc_prep,
477         .qc_issue               = ata_qc_issue_prot,
478         .freeze                 = nv_nf2_freeze,
479         .thaw                   = nv_nf2_thaw,
480         .error_handler          = nv_error_handler,
481         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
482         .data_xfer              = ata_data_xfer,
483         .irq_clear              = ata_bmdma_irq_clear,
484         .irq_on                 = ata_irq_on,
485         .scr_read               = nv_scr_read,
486         .scr_write              = nv_scr_write,
487         .port_start             = ata_port_start,
488 };
489
490 static const struct ata_port_operations nv_ck804_ops = {
491         .tf_load                = ata_tf_load,
492         .tf_read                = ata_tf_read,
493         .exec_command           = ata_exec_command,
494         .check_status           = ata_check_status,
495         .dev_select             = ata_std_dev_select,
496         .bmdma_setup            = ata_bmdma_setup,
497         .bmdma_start            = ata_bmdma_start,
498         .bmdma_stop             = ata_bmdma_stop,
499         .bmdma_status           = ata_bmdma_status,
500         .qc_prep                = ata_qc_prep,
501         .qc_issue               = ata_qc_issue_prot,
502         .freeze                 = nv_ck804_freeze,
503         .thaw                   = nv_ck804_thaw,
504         .error_handler          = nv_error_handler,
505         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
506         .data_xfer              = ata_data_xfer,
507         .irq_clear              = ata_bmdma_irq_clear,
508         .irq_on                 = ata_irq_on,
509         .scr_read               = nv_scr_read,
510         .scr_write              = nv_scr_write,
511         .port_start             = ata_port_start,
512         .host_stop              = nv_ck804_host_stop,
513 };
514
515 static const struct ata_port_operations nv_adma_ops = {
516         .tf_load                = ata_tf_load,
517         .tf_read                = nv_adma_tf_read,
518         .check_atapi_dma        = nv_adma_check_atapi_dma,
519         .exec_command           = ata_exec_command,
520         .check_status           = ata_check_status,
521         .dev_select             = ata_std_dev_select,
522         .bmdma_setup            = ata_bmdma_setup,
523         .bmdma_start            = ata_bmdma_start,
524         .bmdma_stop             = ata_bmdma_stop,
525         .bmdma_status           = ata_bmdma_status,
526         .qc_defer               = ata_std_qc_defer,
527         .qc_prep                = nv_adma_qc_prep,
528         .qc_issue               = nv_adma_qc_issue,
529         .freeze                 = nv_adma_freeze,
530         .thaw                   = nv_adma_thaw,
531         .error_handler          = nv_adma_error_handler,
532         .post_internal_cmd      = nv_adma_post_internal_cmd,
533         .data_xfer              = ata_data_xfer,
534         .irq_clear              = nv_adma_irq_clear,
535         .irq_on                 = ata_irq_on,
536         .scr_read               = nv_scr_read,
537         .scr_write              = nv_scr_write,
538         .port_start             = nv_adma_port_start,
539         .port_stop              = nv_adma_port_stop,
540 #ifdef CONFIG_PM
541         .port_suspend           = nv_adma_port_suspend,
542         .port_resume            = nv_adma_port_resume,
543 #endif
544         .host_stop              = nv_adma_host_stop,
545 };
546
547 static const struct ata_port_operations nv_swncq_ops = {
548         .tf_load                = ata_tf_load,
549         .tf_read                = ata_tf_read,
550         .exec_command           = ata_exec_command,
551         .check_status           = ata_check_status,
552         .dev_select             = ata_std_dev_select,
553         .bmdma_setup            = ata_bmdma_setup,
554         .bmdma_start            = ata_bmdma_start,
555         .bmdma_stop             = ata_bmdma_stop,
556         .bmdma_status           = ata_bmdma_status,
557         .qc_defer               = ata_std_qc_defer,
558         .qc_prep                = nv_swncq_qc_prep,
559         .qc_issue               = nv_swncq_qc_issue,
560         .freeze                 = nv_mcp55_freeze,
561         .thaw                   = nv_mcp55_thaw,
562         .error_handler          = nv_swncq_error_handler,
563         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
564         .data_xfer              = ata_data_xfer,
565         .irq_clear              = ata_bmdma_irq_clear,
566         .irq_on                 = ata_irq_on,
567         .scr_read               = nv_scr_read,
568         .scr_write              = nv_scr_write,
569 #ifdef CONFIG_PM
570         .port_suspend           = nv_swncq_port_suspend,
571         .port_resume            = nv_swncq_port_resume,
572 #endif
573         .port_start             = nv_swncq_port_start,
574 };
575
576 static const struct ata_port_info nv_port_info[] = {
577         /* generic */
578         {
579                 .sht            = &nv_sht,
580                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
581                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
582                 .pio_mask       = NV_PIO_MASK,
583                 .mwdma_mask     = NV_MWDMA_MASK,
584                 .udma_mask      = NV_UDMA_MASK,
585                 .port_ops       = &nv_generic_ops,
586                 .irq_handler    = nv_generic_interrupt,
587         },
588         /* nforce2/3 */
589         {
590                 .sht            = &nv_sht,
591                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
592                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
593                 .pio_mask       = NV_PIO_MASK,
594                 .mwdma_mask     = NV_MWDMA_MASK,
595                 .udma_mask      = NV_UDMA_MASK,
596                 .port_ops       = &nv_nf2_ops,
597                 .irq_handler    = nv_nf2_interrupt,
598         },
599         /* ck804 */
600         {
601                 .sht            = &nv_sht,
602                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
603                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
604                 .pio_mask       = NV_PIO_MASK,
605                 .mwdma_mask     = NV_MWDMA_MASK,
606                 .udma_mask      = NV_UDMA_MASK,
607                 .port_ops       = &nv_ck804_ops,
608                 .irq_handler    = nv_ck804_interrupt,
609         },
610         /* ADMA */
611         {
612                 .sht            = &nv_adma_sht,
613                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
614                                   ATA_FLAG_MMIO | ATA_FLAG_NCQ,
615                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
616                 .pio_mask       = NV_PIO_MASK,
617                 .mwdma_mask     = NV_MWDMA_MASK,
618                 .udma_mask      = NV_UDMA_MASK,
619                 .port_ops       = &nv_adma_ops,
620                 .irq_handler    = nv_adma_interrupt,
621         },
622         /* SWNCQ */
623         {
624                 .sht            = &nv_swncq_sht,
625                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
626                                   ATA_FLAG_NCQ,
627                 .link_flags     = ATA_LFLAG_HRST_TO_RESUME,
628                 .pio_mask       = NV_PIO_MASK,
629                 .mwdma_mask     = NV_MWDMA_MASK,
630                 .udma_mask      = NV_UDMA_MASK,
631                 .port_ops       = &nv_swncq_ops,
632                 .irq_handler    = nv_swncq_interrupt,
633         },
634 };
635
636 MODULE_AUTHOR("NVIDIA");
637 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
638 MODULE_LICENSE("GPL");
639 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
640 MODULE_VERSION(DRV_VERSION);
641
642 static int adma_enabled = 1;
643 static int swncq_enabled;
644
645 static void nv_adma_register_mode(struct ata_port *ap)
646 {
647         struct nv_adma_port_priv *pp = ap->private_data;
648         void __iomem *mmio = pp->ctl_block;
649         u16 tmp, status;
650         int count = 0;
651
652         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
653                 return;
654
655         status = readw(mmio + NV_ADMA_STAT);
656         while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
657                 ndelay(50);
658                 status = readw(mmio + NV_ADMA_STAT);
659                 count++;
660         }
661         if (count == 20)
662                 ata_port_printk(ap, KERN_WARNING,
663                         "timeout waiting for ADMA IDLE, stat=0x%hx\n",
664                         status);
665
666         tmp = readw(mmio + NV_ADMA_CTL);
667         writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
668
669         count = 0;
670         status = readw(mmio + NV_ADMA_STAT);
671         while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
672                 ndelay(50);
673                 status = readw(mmio + NV_ADMA_STAT);
674                 count++;
675         }
676         if (count == 20)
677                 ata_port_printk(ap, KERN_WARNING,
678                          "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
679                          status);
680
681         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
682 }
683
684 static void nv_adma_mode(struct ata_port *ap)
685 {
686         struct nv_adma_port_priv *pp = ap->private_data;
687         void __iomem *mmio = pp->ctl_block;
688         u16 tmp, status;
689         int count = 0;
690
691         if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
692                 return;
693
694         WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
695
696         tmp = readw(mmio + NV_ADMA_CTL);
697         writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
698
699         status = readw(mmio + NV_ADMA_STAT);
700         while (((status & NV_ADMA_STAT_LEGACY) ||
701               !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
702                 ndelay(50);
703                 status = readw(mmio + NV_ADMA_STAT);
704                 count++;
705         }
706         if (count == 20)
707                 ata_port_printk(ap, KERN_WARNING,
708                         "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
709                         status);
710
711         pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
712 }
713
714 static int nv_adma_slave_config(struct scsi_device *sdev)
715 {
716         struct ata_port *ap = ata_shost_to_port(sdev->host);
717         struct nv_adma_port_priv *pp = ap->private_data;
718         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
719         u64 bounce_limit;
720         unsigned long segment_boundary;
721         unsigned short sg_tablesize;
722         int rc;
723         int adma_enable;
724         u32 current_reg, new_reg, config_mask;
725
726         rc = ata_scsi_slave_config(sdev);
727
728         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
729                 /* Not a proper libata device, ignore */
730                 return rc;
731
732         if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
733                 /*
734                  * NVIDIA reports that ADMA mode does not support ATAPI commands.
735                  * Therefore ATAPI commands are sent through the legacy interface.
736                  * However, the legacy interface only supports 32-bit DMA.
737                  * Restrict DMA parameters as required by the legacy interface
738                  * when an ATAPI device is connected.
739                  */
740                 bounce_limit = ATA_DMA_MASK;
741                 segment_boundary = ATA_DMA_BOUNDARY;
742                 /* Subtract 1 since an extra entry may be needed for padding, see
743                    libata-scsi.c */
744                 sg_tablesize = LIBATA_MAX_PRD - 1;
745
746                 /* Since the legacy DMA engine is in use, we need to disable ADMA
747                    on the port. */
748                 adma_enable = 0;
749                 nv_adma_register_mode(ap);
750         } else {
751                 bounce_limit = *ap->dev->dma_mask;
752                 segment_boundary = NV_ADMA_DMA_BOUNDARY;
753                 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
754                 adma_enable = 1;
755         }
756
757         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
758
759         if (ap->port_no == 1)
760                 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
761                               NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
762         else
763                 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
764                               NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
765
766         if (adma_enable) {
767                 new_reg = current_reg | config_mask;
768                 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
769         } else {
770                 new_reg = current_reg & ~config_mask;
771                 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
772         }
773
774         if (current_reg != new_reg)
775                 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
776
777         blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
778         blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
779         blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
780         ata_port_printk(ap, KERN_INFO,
781                 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
782                 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
783         return rc;
784 }
785
786 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
787 {
788         struct nv_adma_port_priv *pp = qc->ap->private_data;
789         return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
790 }
791
792 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
793 {
794         /* Since commands where a result TF is requested are not
795            executed in ADMA mode, the only time this function will be called
796            in ADMA mode will be if a command fails. In this case we
797            don't care about going into register mode with ADMA commands
798            pending, as the commands will all shortly be aborted anyway. */
799         nv_adma_register_mode(ap);
800
801         ata_tf_read(ap, tf);
802 }
803
804 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
805 {
806         unsigned int idx = 0;
807
808         if (tf->flags & ATA_TFLAG_ISADDR) {
809                 if (tf->flags & ATA_TFLAG_LBA48) {
810                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
811                         cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
812                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
813                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
814                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
815                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
816                 } else
817                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
818
819                 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
820                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
821                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
822                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
823         }
824
825         if (tf->flags & ATA_TFLAG_DEVICE)
826                 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
827
828         cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
829
830         while (idx < 12)
831                 cpb[idx++] = cpu_to_le16(IGN);
832
833         return idx;
834 }
835
836 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
837 {
838         struct nv_adma_port_priv *pp = ap->private_data;
839         u8 flags = pp->cpb[cpb_num].resp_flags;
840
841         VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
842
843         if (unlikely((force_err ||
844                      flags & (NV_CPB_RESP_ATA_ERR |
845                               NV_CPB_RESP_CMD_ERR |
846                               NV_CPB_RESP_CPB_ERR)))) {
847                 struct ata_eh_info *ehi = &ap->link.eh_info;
848                 int freeze = 0;
849
850                 ata_ehi_clear_desc(ehi);
851                 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
852                 if (flags & NV_CPB_RESP_ATA_ERR) {
853                         ata_ehi_push_desc(ehi, "ATA error");
854                         ehi->err_mask |= AC_ERR_DEV;
855                 } else if (flags & NV_CPB_RESP_CMD_ERR) {
856                         ata_ehi_push_desc(ehi, "CMD error");
857                         ehi->err_mask |= AC_ERR_DEV;
858                 } else if (flags & NV_CPB_RESP_CPB_ERR) {
859                         ata_ehi_push_desc(ehi, "CPB error");
860                         ehi->err_mask |= AC_ERR_SYSTEM;
861                         freeze = 1;
862                 } else {
863                         /* notifier error, but no error in CPB flags? */
864                         ata_ehi_push_desc(ehi, "unknown");
865                         ehi->err_mask |= AC_ERR_OTHER;
866                         freeze = 1;
867                 }
868                 /* Kill all commands. EH will determine what actually failed. */
869                 if (freeze)
870                         ata_port_freeze(ap);
871                 else
872                         ata_port_abort(ap);
873                 return 1;
874         }
875
876         if (likely(flags & NV_CPB_RESP_DONE)) {
877                 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
878                 VPRINTK("CPB flags done, flags=0x%x\n", flags);
879                 if (likely(qc)) {
880                         DPRINTK("Completing qc from tag %d\n", cpb_num);
881                         ata_qc_complete(qc);
882                 } else {
883                         struct ata_eh_info *ehi = &ap->link.eh_info;
884                         /* Notifier bits set without a command may indicate the drive
885                            is misbehaving. Raise host state machine violation on this
886                            condition. */
887                         ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
888                                 cpb_num);
889                         ehi->err_mask |= AC_ERR_HSM;
890                         ehi->action |= ATA_EH_SOFTRESET;
891                         ata_port_freeze(ap);
892                         return 1;
893                 }
894         }
895         return 0;
896 }
897
898 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
899 {
900         struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
901
902         /* freeze if hotplugged */
903         if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
904                 ata_port_freeze(ap);
905                 return 1;
906         }
907
908         /* bail out if not our interrupt */
909         if (!(irq_stat & NV_INT_DEV))
910                 return 0;
911
912         /* DEV interrupt w/ no active qc? */
913         if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
914                 ata_check_status(ap);
915                 return 1;
916         }
917
918         /* handle interrupt */
919         return ata_host_intr(ap, qc);
920 }
921
922 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
923 {
924         struct ata_host *host = dev_instance;
925         int i, handled = 0;
926         u32 notifier_clears[2];
927
928         spin_lock(&host->lock);
929
930         for (i = 0; i < host->n_ports; i++) {
931                 struct ata_port *ap = host->ports[i];
932                 notifier_clears[i] = 0;
933
934                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
935                         struct nv_adma_port_priv *pp = ap->private_data;
936                         void __iomem *mmio = pp->ctl_block;
937                         u16 status;
938                         u32 gen_ctl;
939                         u32 notifier, notifier_error;
940
941                         /* if ADMA is disabled, use standard ata interrupt handler */
942                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
943                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
944                                         >> (NV_INT_PORT_SHIFT * i);
945                                 handled += nv_host_intr(ap, irq_stat);
946                                 continue;
947                         }
948
949                         /* if in ATA register mode, check for standard interrupts */
950                         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
951                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
952                                         >> (NV_INT_PORT_SHIFT * i);
953                                 if (ata_tag_valid(ap->link.active_tag))
954                                         /** NV_INT_DEV indication seems unreliable at times
955                                             at least in ADMA mode. Force it on always when a
956                                             command is active, to prevent losing interrupts. */
957                                         irq_stat |= NV_INT_DEV;
958                                 handled += nv_host_intr(ap, irq_stat);
959                         }
960
961                         notifier = readl(mmio + NV_ADMA_NOTIFIER);
962                         notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
963                         notifier_clears[i] = notifier | notifier_error;
964
965                         gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
966
967                         if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
968                             !notifier_error)
969                                 /* Nothing to do */
970                                 continue;
971
972                         status = readw(mmio + NV_ADMA_STAT);
973
974                         /* Clear status. Ensure the controller sees the clearing before we start
975                            looking at any of the CPB statuses, so that any CPB completions after
976                            this point in the handler will raise another interrupt. */
977                         writew(status, mmio + NV_ADMA_STAT);
978                         readw(mmio + NV_ADMA_STAT); /* flush posted write */
979                         rmb();
980
981                         handled++; /* irq handled if we got here */
982
983                         /* freeze if hotplugged or controller error */
984                         if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
985                                                NV_ADMA_STAT_HOTUNPLUG |
986                                                NV_ADMA_STAT_TIMEOUT |
987                                                NV_ADMA_STAT_SERROR))) {
988                                 struct ata_eh_info *ehi = &ap->link.eh_info;
989
990                                 ata_ehi_clear_desc(ehi);
991                                 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
992                                 if (status & NV_ADMA_STAT_TIMEOUT) {
993                                         ehi->err_mask |= AC_ERR_SYSTEM;
994                                         ata_ehi_push_desc(ehi, "timeout");
995                                 } else if (status & NV_ADMA_STAT_HOTPLUG) {
996                                         ata_ehi_hotplugged(ehi);
997                                         ata_ehi_push_desc(ehi, "hotplug");
998                                 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
999                                         ata_ehi_hotplugged(ehi);
1000                                         ata_ehi_push_desc(ehi, "hot unplug");
1001                                 } else if (status & NV_ADMA_STAT_SERROR) {
1002                                         /* let libata analyze SError and figure out the cause */
1003                                         ata_ehi_push_desc(ehi, "SError");
1004                                 } else
1005                                         ata_ehi_push_desc(ehi, "unknown");
1006                                 ata_port_freeze(ap);
1007                                 continue;
1008                         }
1009
1010                         if (status & (NV_ADMA_STAT_DONE |
1011                                       NV_ADMA_STAT_CPBERR)) {
1012                                 u32 check_commands;
1013                                 int pos, error = 0;
1014
1015                                 if(ata_tag_valid(ap->link.active_tag))
1016                                         check_commands = 1 << ap->link.active_tag;
1017                                 else
1018                                         check_commands = ap->link.sactive;
1019
1020                                 /** Check CPBs for completed commands */
1021                                 while ((pos = ffs(check_commands)) && !error) {
1022                                         pos--;
1023                                         error = nv_adma_check_cpb(ap, pos,
1024                                                 notifier_error & (1 << pos) );
1025                                         check_commands &= ~(1 << pos );
1026                                 }
1027                         }
1028                 }
1029         }
1030
1031         if(notifier_clears[0] || notifier_clears[1]) {
1032                 /* Note: Both notifier clear registers must be written
1033                    if either is set, even if one is zero, according to NVIDIA. */
1034                 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1035                 writel(notifier_clears[0], pp->notifier_clear_block);
1036                 pp = host->ports[1]->private_data;
1037                 writel(notifier_clears[1], pp->notifier_clear_block);
1038         }
1039
1040         spin_unlock(&host->lock);
1041
1042         return IRQ_RETVAL(handled);
1043 }
1044
1045 static void nv_adma_freeze(struct ata_port *ap)
1046 {
1047         struct nv_adma_port_priv *pp = ap->private_data;
1048         void __iomem *mmio = pp->ctl_block;
1049         u16 tmp;
1050
1051         nv_ck804_freeze(ap);
1052
1053         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1054                 return;
1055
1056         /* clear any outstanding CK804 notifications */
1057         writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1058                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1059
1060         /* Disable interrupt */
1061         tmp = readw(mmio + NV_ADMA_CTL);
1062         writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1063                 mmio + NV_ADMA_CTL);
1064         readw(mmio + NV_ADMA_CTL );     /* flush posted write */
1065 }
1066
1067 static void nv_adma_thaw(struct ata_port *ap)
1068 {
1069         struct nv_adma_port_priv *pp = ap->private_data;
1070         void __iomem *mmio = pp->ctl_block;
1071         u16 tmp;
1072
1073         nv_ck804_thaw(ap);
1074
1075         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1076                 return;
1077
1078         /* Enable interrupt */
1079         tmp = readw(mmio + NV_ADMA_CTL);
1080         writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1081                 mmio + NV_ADMA_CTL);
1082         readw(mmio + NV_ADMA_CTL );     /* flush posted write */
1083 }
1084
1085 static void nv_adma_irq_clear(struct ata_port *ap)
1086 {
1087         struct nv_adma_port_priv *pp = ap->private_data;
1088         void __iomem *mmio = pp->ctl_block;
1089         u32 notifier_clears[2];
1090
1091         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1092                 ata_bmdma_irq_clear(ap);
1093                 return;
1094         }
1095
1096         /* clear any outstanding CK804 notifications */
1097         writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1098                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1099
1100         /* clear ADMA status */
1101         writew(0xffff, mmio + NV_ADMA_STAT);
1102
1103         /* clear notifiers - note both ports need to be written with
1104            something even though we are only clearing on one */
1105         if (ap->port_no == 0) {
1106                 notifier_clears[0] = 0xFFFFFFFF;
1107                 notifier_clears[1] = 0;
1108         } else {
1109                 notifier_clears[0] = 0;
1110                 notifier_clears[1] = 0xFFFFFFFF;
1111         }
1112         pp = ap->host->ports[0]->private_data;
1113         writel(notifier_clears[0], pp->notifier_clear_block);
1114         pp = ap->host->ports[1]->private_data;
1115         writel(notifier_clears[1], pp->notifier_clear_block);
1116 }
1117
1118 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1119 {
1120         struct nv_adma_port_priv *pp = qc->ap->private_data;
1121
1122         if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1123                 ata_bmdma_post_internal_cmd(qc);
1124 }
1125
1126 static int nv_adma_port_start(struct ata_port *ap)
1127 {
1128         struct device *dev = ap->host->dev;
1129         struct nv_adma_port_priv *pp;
1130         int rc;
1131         void *mem;
1132         dma_addr_t mem_dma;
1133         void __iomem *mmio;
1134         u16 tmp;
1135
1136         VPRINTK("ENTER\n");
1137
1138         rc = ata_port_start(ap);
1139         if (rc)
1140                 return rc;
1141
1142         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1143         if (!pp)
1144                 return -ENOMEM;
1145
1146         mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1147                ap->port_no * NV_ADMA_PORT_SIZE;
1148         pp->ctl_block = mmio;
1149         pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1150         pp->notifier_clear_block = pp->gen_block +
1151                NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1152
1153         mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1154                                   &mem_dma, GFP_KERNEL);
1155         if (!mem)
1156                 return -ENOMEM;
1157         memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1158
1159         /*
1160          * First item in chunk of DMA memory:
1161          * 128-byte command parameter block (CPB)
1162          * one for each command tag
1163          */
1164         pp->cpb     = mem;
1165         pp->cpb_dma = mem_dma;
1166
1167         writel(mem_dma & 0xFFFFFFFF,    mmio + NV_ADMA_CPB_BASE_LOW);
1168         writel((mem_dma >> 16 ) >> 16,  mmio + NV_ADMA_CPB_BASE_HIGH);
1169
1170         mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1171         mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1172
1173         /*
1174          * Second item: block of ADMA_SGTBL_LEN s/g entries
1175          */
1176         pp->aprd = mem;
1177         pp->aprd_dma = mem_dma;
1178
1179         ap->private_data = pp;
1180
1181         /* clear any outstanding interrupt conditions */
1182         writew(0xffff, mmio + NV_ADMA_STAT);
1183
1184         /* initialize port variables */
1185         pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1186
1187         /* clear CPB fetch count */
1188         writew(0, mmio + NV_ADMA_CPB_COUNT);
1189
1190         /* clear GO for register mode, enable interrupt */
1191         tmp = readw(mmio + NV_ADMA_CTL);
1192         writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1193                  NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1194
1195         tmp = readw(mmio + NV_ADMA_CTL);
1196         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1197         readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1198         udelay(1);
1199         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1200         readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1201
1202         return 0;
1203 }
1204
1205 static void nv_adma_port_stop(struct ata_port *ap)
1206 {
1207         struct nv_adma_port_priv *pp = ap->private_data;
1208         void __iomem *mmio = pp->ctl_block;
1209
1210         VPRINTK("ENTER\n");
1211         writew(0, mmio + NV_ADMA_CTL);
1212 }
1213
1214 #ifdef CONFIG_PM
1215 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1216 {
1217         struct nv_adma_port_priv *pp = ap->private_data;
1218         void __iomem *mmio = pp->ctl_block;
1219
1220         /* Go to register mode - clears GO */
1221         nv_adma_register_mode(ap);
1222
1223         /* clear CPB fetch count */
1224         writew(0, mmio + NV_ADMA_CPB_COUNT);
1225
1226         /* disable interrupt, shut down port */
1227         writew(0, mmio + NV_ADMA_CTL);
1228
1229         return 0;
1230 }
1231
1232 static int nv_adma_port_resume(struct ata_port *ap)
1233 {
1234         struct nv_adma_port_priv *pp = ap->private_data;
1235         void __iomem *mmio = pp->ctl_block;
1236         u16 tmp;
1237
1238         /* set CPB block location */
1239         writel(pp->cpb_dma & 0xFFFFFFFF,        mmio + NV_ADMA_CPB_BASE_LOW);
1240         writel((pp->cpb_dma >> 16 ) >> 16,      mmio + NV_ADMA_CPB_BASE_HIGH);
1241
1242         /* clear any outstanding interrupt conditions */
1243         writew(0xffff, mmio + NV_ADMA_STAT);
1244
1245         /* initialize port variables */
1246         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1247
1248         /* clear CPB fetch count */
1249         writew(0, mmio + NV_ADMA_CPB_COUNT);
1250
1251         /* clear GO for register mode, enable interrupt */
1252         tmp = readw(mmio + NV_ADMA_CTL);
1253         writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1254                  NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1255
1256         tmp = readw(mmio + NV_ADMA_CTL);
1257         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1258         readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1259         udelay(1);
1260         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1261         readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1262
1263         return 0;
1264 }
1265 #endif
1266
1267 static void nv_adma_setup_port(struct ata_port *ap)
1268 {
1269         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1270         struct ata_ioports *ioport = &ap->ioaddr;
1271
1272         VPRINTK("ENTER\n");
1273
1274         mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1275
1276         ioport->cmd_addr        = mmio;
1277         ioport->data_addr       = mmio + (ATA_REG_DATA * 4);
1278         ioport->error_addr      =
1279         ioport->feature_addr    = mmio + (ATA_REG_ERR * 4);
1280         ioport->nsect_addr      = mmio + (ATA_REG_NSECT * 4);
1281         ioport->lbal_addr       = mmio + (ATA_REG_LBAL * 4);
1282         ioport->lbam_addr       = mmio + (ATA_REG_LBAM * 4);
1283         ioport->lbah_addr       = mmio + (ATA_REG_LBAH * 4);
1284         ioport->device_addr     = mmio + (ATA_REG_DEVICE * 4);
1285         ioport->status_addr     =
1286         ioport->command_addr    = mmio + (ATA_REG_STATUS * 4);
1287         ioport->altstatus_addr  =
1288         ioport->ctl_addr        = mmio + 0x20;
1289 }
1290
1291 static int nv_adma_host_init(struct ata_host *host)
1292 {
1293         struct pci_dev *pdev = to_pci_dev(host->dev);
1294         unsigned int i;
1295         u32 tmp32;
1296
1297         VPRINTK("ENTER\n");
1298
1299         /* enable ADMA on the ports */
1300         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1301         tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1302                  NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1303                  NV_MCP_SATA_CFG_20_PORT1_EN |
1304                  NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1305
1306         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1307
1308         for (i = 0; i < host->n_ports; i++)
1309                 nv_adma_setup_port(host->ports[i]);
1310
1311         return 0;
1312 }
1313
1314 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1315                               struct scatterlist *sg,
1316                               int idx,
1317                               struct nv_adma_prd *aprd)
1318 {
1319         u8 flags = 0;
1320         if (qc->tf.flags & ATA_TFLAG_WRITE)
1321                 flags |= NV_APRD_WRITE;
1322         if (idx == qc->n_elem - 1)
1323                 flags |= NV_APRD_END;
1324         else if (idx != 4)
1325                 flags |= NV_APRD_CONT;
1326
1327         aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1328         aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1329         aprd->flags = flags;
1330         aprd->packet_len = 0;
1331 }
1332
1333 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1334 {
1335         struct nv_adma_port_priv *pp = qc->ap->private_data;
1336         unsigned int idx;
1337         struct nv_adma_prd *aprd;
1338         struct scatterlist *sg;
1339
1340         VPRINTK("ENTER\n");
1341
1342         idx = 0;
1343
1344         ata_for_each_sg(sg, qc) {
1345                 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1346                 nv_adma_fill_aprd(qc, sg, idx, aprd);
1347                 idx++;
1348         }
1349         if (idx > 5)
1350                 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1351         else
1352                 cpb->next_aprd = cpu_to_le64(0);
1353 }
1354
1355 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1356 {
1357         struct nv_adma_port_priv *pp = qc->ap->private_data;
1358
1359         /* ADMA engine can only be used for non-ATAPI DMA commands,
1360            or interrupt-driven no-data commands, where a result taskfile
1361            is not required. */
1362         if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1363            (qc->tf.flags & ATA_TFLAG_POLLING) ||
1364            (qc->flags & ATA_QCFLAG_RESULT_TF))
1365                 return 1;
1366
1367         if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1368            (qc->tf.protocol == ATA_PROT_NODATA))
1369                 return 0;
1370
1371         return 1;
1372 }
1373
1374 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1375 {
1376         struct nv_adma_port_priv *pp = qc->ap->private_data;
1377         struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1378         u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1379                        NV_CPB_CTL_IEN;
1380
1381         if (nv_adma_use_reg_mode(qc)) {
1382                 nv_adma_register_mode(qc->ap);
1383                 ata_qc_prep(qc);
1384                 return;
1385         }
1386
1387         cpb->resp_flags = NV_CPB_RESP_DONE;
1388         wmb();
1389         cpb->ctl_flags = 0;
1390         wmb();
1391
1392         cpb->len                = 3;
1393         cpb->tag                = qc->tag;
1394         cpb->next_cpb_idx       = 0;
1395
1396         /* turn on NCQ flags for NCQ commands */
1397         if (qc->tf.protocol == ATA_PROT_NCQ)
1398                 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1399
1400         VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1401
1402         nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1403
1404         if(qc->flags & ATA_QCFLAG_DMAMAP) {
1405                 nv_adma_fill_sg(qc, cpb);
1406                 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1407         } else
1408                 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1409
1410         /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1411            finished filling in all of the contents */
1412         wmb();
1413         cpb->ctl_flags = ctl_flags;
1414         wmb();
1415         cpb->resp_flags = 0;
1416 }
1417
1418 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1419 {
1420         struct nv_adma_port_priv *pp = qc->ap->private_data;
1421         void __iomem *mmio = pp->ctl_block;
1422         int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1423
1424         VPRINTK("ENTER\n");
1425
1426         if (nv_adma_use_reg_mode(qc)) {
1427                 /* use ATA register mode */
1428                 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1429                 nv_adma_register_mode(qc->ap);
1430                 return ata_qc_issue_prot(qc);
1431         } else
1432                 nv_adma_mode(qc->ap);
1433
1434         /* write append register, command tag in lower 8 bits
1435            and (number of cpbs to append -1) in top 8 bits */
1436         wmb();
1437
1438         if(curr_ncq != pp->last_issue_ncq) {
1439                 /* Seems to need some delay before switching between NCQ and non-NCQ
1440                    commands, else we get command timeouts and such. */
1441                 udelay(20);
1442                 pp->last_issue_ncq = curr_ncq;
1443         }
1444
1445         writew(qc->tag, mmio + NV_ADMA_APPEND);
1446
1447         DPRINTK("Issued tag %u\n",qc->tag);
1448
1449         return 0;
1450 }
1451
1452 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1453 {
1454         struct ata_host *host = dev_instance;
1455         unsigned int i;
1456         unsigned int handled = 0;
1457         unsigned long flags;
1458
1459         spin_lock_irqsave(&host->lock, flags);
1460
1461         for (i = 0; i < host->n_ports; i++) {
1462                 struct ata_port *ap;
1463
1464                 ap = host->ports[i];
1465                 if (ap &&
1466                     !(ap->flags & ATA_FLAG_DISABLED)) {
1467                         struct ata_queued_cmd *qc;
1468
1469                         qc = ata_qc_from_tag(ap, ap->link.active_tag);
1470                         if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1471                                 handled += ata_host_intr(ap, qc);
1472                         else
1473                                 // No request pending?  Clear interrupt status
1474                                 // anyway, in case there's one pending.
1475                                 ap->ops->check_status(ap);
1476                 }
1477
1478         }
1479
1480         spin_unlock_irqrestore(&host->lock, flags);
1481
1482         return IRQ_RETVAL(handled);
1483 }
1484
1485 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1486 {
1487         int i, handled = 0;
1488
1489         for (i = 0; i < host->n_ports; i++) {
1490                 struct ata_port *ap = host->ports[i];
1491
1492                 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1493                         handled += nv_host_intr(ap, irq_stat);
1494
1495                 irq_stat >>= NV_INT_PORT_SHIFT;
1496         }
1497
1498         return IRQ_RETVAL(handled);
1499 }
1500
1501 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1502 {
1503         struct ata_host *host = dev_instance;
1504         u8 irq_stat;
1505         irqreturn_t ret;
1506
1507         spin_lock(&host->lock);
1508         irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1509         ret = nv_do_interrupt(host, irq_stat);
1510         spin_unlock(&host->lock);
1511
1512         return ret;
1513 }
1514
1515 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1516 {
1517         struct ata_host *host = dev_instance;
1518         u8 irq_stat;
1519         irqreturn_t ret;
1520
1521         spin_lock(&host->lock);
1522         irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1523         ret = nv_do_interrupt(host, irq_stat);
1524         spin_unlock(&host->lock);
1525
1526         return ret;
1527 }
1528
1529 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1530 {
1531         if (sc_reg > SCR_CONTROL)
1532                 return -EINVAL;
1533
1534         *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1535         return 0;
1536 }
1537
1538 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1539 {
1540         if (sc_reg > SCR_CONTROL)
1541                 return -EINVAL;
1542
1543         iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1544         return 0;
1545 }
1546
1547 static void nv_nf2_freeze(struct ata_port *ap)
1548 {
1549         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1550         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1551         u8 mask;
1552
1553         mask = ioread8(scr_addr + NV_INT_ENABLE);
1554         mask &= ~(NV_INT_ALL << shift);
1555         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1556 }
1557
1558 static void nv_nf2_thaw(struct ata_port *ap)
1559 {
1560         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1561         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1562         u8 mask;
1563
1564         iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1565
1566         mask = ioread8(scr_addr + NV_INT_ENABLE);
1567         mask |= (NV_INT_MASK << shift);
1568         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1569 }
1570
1571 static void nv_ck804_freeze(struct ata_port *ap)
1572 {
1573         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1574         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1575         u8 mask;
1576
1577         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1578         mask &= ~(NV_INT_ALL << shift);
1579         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1580 }
1581
1582 static void nv_ck804_thaw(struct ata_port *ap)
1583 {
1584         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1585         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1586         u8 mask;
1587
1588         writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1589
1590         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1591         mask |= (NV_INT_MASK << shift);
1592         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1593 }
1594
1595 static void nv_mcp55_freeze(struct ata_port *ap)
1596 {
1597         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1598         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1599         u32 mask;
1600
1601         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1602
1603         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1604         mask &= ~(NV_INT_ALL_MCP55 << shift);
1605         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1606         ata_bmdma_freeze(ap);
1607 }
1608
1609 static void nv_mcp55_thaw(struct ata_port *ap)
1610 {
1611         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1612         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1613         u32 mask;
1614
1615         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1616
1617         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1618         mask |= (NV_INT_MASK_MCP55 << shift);
1619         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1620         ata_bmdma_thaw(ap);
1621 }
1622
1623 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1624                         unsigned long deadline)
1625 {
1626         unsigned int dummy;
1627
1628         /* SATA hardreset fails to retrieve proper device signature on
1629          * some controllers.  Don't classify on hardreset.  For more
1630          * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1631          */
1632         return sata_std_hardreset(link, &dummy, deadline);
1633 }
1634
1635 static void nv_error_handler(struct ata_port *ap)
1636 {
1637         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1638                            nv_hardreset, ata_std_postreset);
1639 }
1640
1641 static void nv_adma_error_handler(struct ata_port *ap)
1642 {
1643         struct nv_adma_port_priv *pp = ap->private_data;
1644         if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1645                 void __iomem *mmio = pp->ctl_block;
1646                 int i;
1647                 u16 tmp;
1648
1649                 if(ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1650                         u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1651                         u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1652                         u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1653                         u32 status = readw(mmio + NV_ADMA_STAT);
1654                         u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1655                         u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1656
1657                         ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1658                                 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1659                                 "next cpb count 0x%X next cpb idx 0x%x\n",
1660                                 notifier, notifier_error, gen_ctl, status,
1661                                 cpb_count, next_cpb_idx);
1662
1663                         for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1664                                 struct nv_adma_cpb *cpb = &pp->cpb[i];
1665                                 if( (ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1666                                     ap->link.sactive & (1 << i) )
1667                                         ata_port_printk(ap, KERN_ERR,
1668                                                 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1669                                                 i, cpb->ctl_flags, cpb->resp_flags);
1670                         }
1671                 }
1672
1673                 /* Push us back into port register mode for error handling. */
1674                 nv_adma_register_mode(ap);
1675
1676                 /* Mark all of the CPBs as invalid to prevent them from being executed */
1677                 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1678                         pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1679
1680                 /* clear CPB fetch count */
1681                 writew(0, mmio + NV_ADMA_CPB_COUNT);
1682
1683                 /* Reset channel */
1684                 tmp = readw(mmio + NV_ADMA_CTL);
1685                 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1686                 readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1687                 udelay(1);
1688                 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1689                 readw( mmio + NV_ADMA_CTL );    /* flush posted write */
1690         }
1691
1692         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1693                            nv_hardreset, ata_std_postreset);
1694 }
1695
1696 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1697 {
1698         struct nv_swncq_port_priv *pp = ap->private_data;
1699         struct defer_queue *dq = &pp->defer_queue;
1700
1701         /* queue is full */
1702         WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1703         dq->defer_bits |= (1 << qc->tag);
1704         dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1705 }
1706
1707 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1708 {
1709         struct nv_swncq_port_priv *pp = ap->private_data;
1710         struct defer_queue *dq = &pp->defer_queue;
1711         unsigned int tag;
1712
1713         if (dq->head == dq->tail)       /* null queue */
1714                 return NULL;
1715
1716         tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1717         dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1718         WARN_ON(!(dq->defer_bits & (1 << tag)));
1719         dq->defer_bits &= ~(1 << tag);
1720
1721         return ata_qc_from_tag(ap, tag);
1722 }
1723
1724 static void nv_swncq_fis_reinit(struct ata_port *ap)
1725 {
1726         struct nv_swncq_port_priv *pp = ap->private_data;
1727
1728         pp->dhfis_bits = 0;
1729         pp->dmafis_bits = 0;
1730         pp->sdbfis_bits = 0;
1731         pp->ncq_flags = 0;
1732 }
1733
1734 static void nv_swncq_pp_reinit(struct ata_port *ap)
1735 {
1736         struct nv_swncq_port_priv *pp = ap->private_data;
1737         struct defer_queue *dq = &pp->defer_queue;
1738
1739         dq->head = 0;
1740         dq->tail = 0;
1741         dq->defer_bits = 0;
1742         pp->qc_active = 0;
1743         pp->last_issue_tag = ATA_TAG_POISON;
1744         nv_swncq_fis_reinit(ap);
1745 }
1746
1747 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1748 {
1749         struct nv_swncq_port_priv *pp = ap->private_data;
1750
1751         writew(fis, pp->irq_block);
1752 }
1753
1754 static void __ata_bmdma_stop(struct ata_port *ap)
1755 {
1756         struct ata_queued_cmd qc;
1757
1758         qc.ap = ap;
1759         ata_bmdma_stop(&qc);
1760 }
1761
1762 static void nv_swncq_ncq_stop(struct ata_port *ap)
1763 {
1764         struct nv_swncq_port_priv *pp = ap->private_data;
1765         unsigned int i;
1766         u32 sactive;
1767         u32 done_mask;
1768
1769         ata_port_printk(ap, KERN_ERR,
1770                         "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1771                         ap->qc_active, ap->link.sactive);
1772         ata_port_printk(ap, KERN_ERR,
1773                 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1774                 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1775                 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1776                 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1777
1778         ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1779                         ap->ops->check_status(ap),
1780                         ioread8(ap->ioaddr.error_addr));
1781
1782         sactive = readl(pp->sactive_block);
1783         done_mask = pp->qc_active ^ sactive;
1784
1785         ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1786         for (i = 0; i < ATA_MAX_QUEUE; i++) {
1787                 u8 err = 0;
1788                 if (pp->qc_active & (1 << i))
1789                         err = 0;
1790                 else if (done_mask & (1 << i))
1791                         err = 1;
1792                 else
1793                         continue;
1794
1795                 ata_port_printk(ap, KERN_ERR,
1796                                 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1797                                 (pp->dhfis_bits >> i) & 0x1,
1798                                 (pp->dmafis_bits >> i) & 0x1,
1799                                 (pp->sdbfis_bits >> i) & 0x1,
1800                                 (sactive >> i) & 0x1,
1801                                 (err ? "error! tag doesn't exit" : " "));
1802         }
1803
1804         nv_swncq_pp_reinit(ap);
1805         ap->ops->irq_clear(ap);
1806         __ata_bmdma_stop(ap);
1807         nv_swncq_irq_clear(ap, 0xffff);
1808 }
1809
1810 static void nv_swncq_error_handler(struct ata_port *ap)
1811 {
1812         struct ata_eh_context *ehc = &ap->link.eh_context;
1813
1814         if (ap->link.sactive) {
1815                 nv_swncq_ncq_stop(ap);
1816                 ehc->i.action |= ATA_EH_HARDRESET;
1817         }
1818
1819         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1820                            nv_hardreset, ata_std_postreset);
1821 }
1822
1823 #ifdef CONFIG_PM
1824 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1825 {
1826         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1827         u32 tmp;
1828
1829         /* clear irq */
1830         writel(~0, mmio + NV_INT_STATUS_MCP55);
1831
1832         /* disable irq */
1833         writel(0, mmio + NV_INT_ENABLE_MCP55);
1834
1835         /* disable swncq */
1836         tmp = readl(mmio + NV_CTL_MCP55);
1837         tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1838         writel(tmp, mmio + NV_CTL_MCP55);
1839
1840         return 0;
1841 }
1842
1843 static int nv_swncq_port_resume(struct ata_port *ap)
1844 {
1845         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1846         u32 tmp;
1847
1848         /* clear irq */
1849         writel(~0, mmio + NV_INT_STATUS_MCP55);
1850
1851         /* enable irq */
1852         writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1853
1854         /* enable swncq */
1855         tmp = readl(mmio + NV_CTL_MCP55);
1856         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1857
1858         return 0;
1859 }
1860 #endif
1861
1862 static void nv_swncq_host_init(struct ata_host *host)
1863 {
1864         u32 tmp;
1865         void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1866         struct pci_dev *pdev = to_pci_dev(host->dev);
1867         u8 regval;
1868
1869         /* disable  ECO 398 */
1870         pci_read_config_byte(pdev, 0x7f, &regval);
1871         regval &= ~(1 << 7);
1872         pci_write_config_byte(pdev, 0x7f, regval);
1873
1874         /* enable swncq */
1875         tmp = readl(mmio + NV_CTL_MCP55);
1876         VPRINTK("HOST_CTL:0x%X\n", tmp);
1877         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1878
1879         /* enable irq intr */
1880         tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1881         VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1882         writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1883
1884         /*  clear port irq */
1885         writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1886 }
1887
1888 static int nv_swncq_slave_config(struct scsi_device *sdev)
1889 {
1890         struct ata_port *ap = ata_shost_to_port(sdev->host);
1891         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1892         struct ata_device *dev;
1893         int rc;
1894         u8 rev;
1895         u8 check_maxtor = 0;
1896         unsigned char model_num[ATA_ID_PROD_LEN + 1];
1897
1898         rc = ata_scsi_slave_config(sdev);
1899         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1900                 /* Not a proper libata device, ignore */
1901                 return rc;
1902
1903         dev = &ap->link.device[sdev->id];
1904         if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1905                 return rc;
1906
1907         /* if MCP51 and Maxtor, then disable ncq */
1908         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1909                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1910                 check_maxtor = 1;
1911
1912         /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1913         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1914                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1915                 pci_read_config_byte(pdev, 0x8, &rev);
1916                 if (rev <= 0xa2)
1917                         check_maxtor = 1;
1918         }
1919
1920         if (!check_maxtor)
1921                 return rc;
1922
1923         ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1924
1925         if (strncmp(model_num, "Maxtor", 6) == 0) {
1926                 ata_scsi_change_queue_depth(sdev, 1);
1927                 ata_dev_printk(dev, KERN_NOTICE,
1928                         "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1929         }
1930
1931         return rc;
1932 }
1933
1934 static int nv_swncq_port_start(struct ata_port *ap)
1935 {
1936         struct device *dev = ap->host->dev;
1937         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1938         struct nv_swncq_port_priv *pp;
1939         int rc;
1940
1941         rc = ata_port_start(ap);
1942         if (rc)
1943                 return rc;
1944
1945         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1946         if (!pp)
1947                 return -ENOMEM;
1948
1949         pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1950                                       &pp->prd_dma, GFP_KERNEL);
1951         if (!pp->prd)
1952                 return -ENOMEM;
1953         memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1954
1955         ap->private_data = pp;
1956         pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1957         pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1958         pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1959
1960         return 0;
1961 }
1962
1963 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1964 {
1965         if (qc->tf.protocol != ATA_PROT_NCQ) {
1966                 ata_qc_prep(qc);
1967                 return;
1968         }
1969
1970         if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1971                 return;
1972
1973         nv_swncq_fill_sg(qc);
1974 }
1975
1976 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1977 {
1978         struct ata_port *ap = qc->ap;
1979         struct scatterlist *sg;
1980         unsigned int idx;
1981         struct nv_swncq_port_priv *pp = ap->private_data;
1982         struct ata_prd *prd;
1983
1984         WARN_ON(qc->__sg == NULL);
1985         WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1986
1987         prd = pp->prd + ATA_MAX_PRD * qc->tag;
1988
1989         idx = 0;
1990         ata_for_each_sg(sg, qc) {
1991                 u32 addr, offset;
1992                 u32 sg_len, len;
1993
1994                 addr = (u32)sg_dma_address(sg);
1995                 sg_len = sg_dma_len(sg);
1996
1997                 while (sg_len) {
1998                         offset = addr & 0xffff;
1999                         len = sg_len;
2000                         if ((offset + sg_len) > 0x10000)
2001                                 len = 0x10000 - offset;
2002
2003                         prd[idx].addr = cpu_to_le32(addr);
2004                         prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2005
2006                         idx++;
2007                         sg_len -= len;
2008                         addr += len;
2009                 }
2010         }
2011
2012         if (idx)
2013                 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2014 }
2015
2016 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2017                                           struct ata_queued_cmd *qc)
2018 {
2019         struct nv_swncq_port_priv *pp = ap->private_data;
2020
2021         if (qc == NULL)
2022                 return 0;
2023
2024         DPRINTK("Enter\n");
2025
2026         writel((1 << qc->tag), pp->sactive_block);
2027         pp->last_issue_tag = qc->tag;
2028         pp->dhfis_bits &= ~(1 << qc->tag);
2029         pp->dmafis_bits &= ~(1 << qc->tag);
2030         pp->qc_active |= (0x1 << qc->tag);
2031
2032         ap->ops->tf_load(ap, &qc->tf);   /* load tf registers */
2033         ap->ops->exec_command(ap, &qc->tf);
2034
2035         DPRINTK("Issued tag %u\n", qc->tag);
2036
2037         return 0;
2038 }
2039
2040 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2041 {
2042         struct ata_port *ap = qc->ap;
2043         struct nv_swncq_port_priv *pp = ap->private_data;
2044
2045         if (qc->tf.protocol != ATA_PROT_NCQ)
2046                 return ata_qc_issue_prot(qc);
2047
2048         DPRINTK("Enter\n");
2049
2050         if (!pp->qc_active)
2051                 nv_swncq_issue_atacmd(ap, qc);
2052         else
2053                 nv_swncq_qc_to_dq(ap, qc);      /* add qc to defer queue */
2054
2055         return 0;
2056 }
2057
2058 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2059 {
2060         u32 serror;
2061         struct ata_eh_info *ehi = &ap->link.eh_info;
2062
2063         ata_ehi_clear_desc(ehi);
2064
2065         /* AHCI needs SError cleared; otherwise, it might lock up */
2066         sata_scr_read(&ap->link, SCR_ERROR, &serror);
2067         sata_scr_write(&ap->link, SCR_ERROR, serror);
2068
2069         /* analyze @irq_stat */
2070         if (fis & NV_SWNCQ_IRQ_ADDED)
2071                 ata_ehi_push_desc(ehi, "hot plug");
2072         else if (fis & NV_SWNCQ_IRQ_REMOVED)
2073                 ata_ehi_push_desc(ehi, "hot unplug");
2074
2075         ata_ehi_hotplugged(ehi);
2076
2077         /* okay, let's hand over to EH */
2078         ehi->serror |= serror;
2079
2080         ata_port_freeze(ap);
2081 }
2082
2083 static int nv_swncq_sdbfis(struct ata_port *ap)
2084 {
2085         struct ata_queued_cmd *qc;
2086         struct nv_swncq_port_priv *pp = ap->private_data;
2087         struct ata_eh_info *ehi = &ap->link.eh_info;
2088         u32 sactive;
2089         int nr_done = 0;
2090         u32 done_mask;
2091         int i;
2092         u8 host_stat;
2093         u8 lack_dhfis = 0;
2094
2095         host_stat = ap->ops->bmdma_status(ap);
2096         if (unlikely(host_stat & ATA_DMA_ERR)) {
2097                 /* error when transfering data to/from memory */
2098                 ata_ehi_clear_desc(ehi);
2099                 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2100                 ehi->err_mask |= AC_ERR_HOST_BUS;
2101                 ehi->action |= ATA_EH_SOFTRESET;
2102                 return -EINVAL;
2103         }
2104
2105         ap->ops->irq_clear(ap);
2106         __ata_bmdma_stop(ap);
2107
2108         sactive = readl(pp->sactive_block);
2109         done_mask = pp->qc_active ^ sactive;
2110
2111         if (unlikely(done_mask & sactive)) {
2112                 ata_ehi_clear_desc(ehi);
2113                 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2114                                   "(%08x->%08x)", pp->qc_active, sactive);
2115                 ehi->err_mask |= AC_ERR_HSM;
2116                 ehi->action |= ATA_EH_HARDRESET;
2117                 return -EINVAL;
2118         }
2119         for (i = 0; i < ATA_MAX_QUEUE; i++) {
2120                 if (!(done_mask & (1 << i)))
2121                         continue;
2122
2123                 qc = ata_qc_from_tag(ap, i);
2124                 if (qc) {
2125                         ata_qc_complete(qc);
2126                         pp->qc_active &= ~(1 << i);
2127                         pp->dhfis_bits &= ~(1 << i);
2128                         pp->dmafis_bits &= ~(1 << i);
2129                         pp->sdbfis_bits |= (1 << i);
2130                         nr_done++;
2131                 }
2132         }
2133
2134         if (!ap->qc_active) {
2135                 DPRINTK("over\n");
2136                 nv_swncq_pp_reinit(ap);
2137                 return nr_done;
2138         }
2139
2140         if (pp->qc_active & pp->dhfis_bits)
2141                 return nr_done;
2142
2143         if ((pp->ncq_flags & ncq_saw_backout) ||
2144             (pp->qc_active ^ pp->dhfis_bits))
2145                 /* if the controller cann't get a device to host register FIS,
2146                  * The driver needs to reissue the new command.
2147                  */
2148                 lack_dhfis = 1;
2149
2150         DPRINTK("id 0x%x QC: qc_active 0x%x,"
2151                 "SWNCQ:qc_active 0x%X defer_bits %X "
2152                 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2153                 ap->print_id, ap->qc_active, pp->qc_active,
2154                 pp->defer_queue.defer_bits, pp->dhfis_bits,
2155                 pp->dmafis_bits, pp->last_issue_tag);
2156
2157         nv_swncq_fis_reinit(ap);
2158
2159         if (lack_dhfis) {
2160                 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2161                 nv_swncq_issue_atacmd(ap, qc);
2162                 return nr_done;
2163         }
2164
2165         if (pp->defer_queue.defer_bits) {
2166                 /* send deferral queue command */
2167                 qc = nv_swncq_qc_from_dq(ap);
2168                 WARN_ON(qc == NULL);
2169                 nv_swncq_issue_atacmd(ap, qc);
2170         }
2171
2172         return nr_done;
2173 }
2174
2175 static inline u32 nv_swncq_tag(struct ata_port *ap)
2176 {
2177         struct nv_swncq_port_priv *pp = ap->private_data;
2178         u32 tag;
2179
2180         tag = readb(pp->tag_block) >> 2;
2181         return (tag & 0x1f);
2182 }
2183
2184 static int nv_swncq_dmafis(struct ata_port *ap)
2185 {
2186         struct ata_queued_cmd *qc;
2187         unsigned int rw;
2188         u8 dmactl;
2189         u32 tag;
2190         struct nv_swncq_port_priv *pp = ap->private_data;
2191
2192         __ata_bmdma_stop(ap);
2193         tag = nv_swncq_tag(ap);
2194
2195         DPRINTK("dma setup tag 0x%x\n", tag);
2196         qc = ata_qc_from_tag(ap, tag);
2197
2198         if (unlikely(!qc))
2199                 return 0;
2200
2201         rw = qc->tf.flags & ATA_TFLAG_WRITE;
2202
2203         /* load PRD table addr. */
2204         iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2205                   ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2206
2207         /* specify data direction, triple-check start bit is clear */
2208         dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2209         dmactl &= ~ATA_DMA_WR;
2210         if (!rw)
2211                 dmactl |= ATA_DMA_WR;
2212
2213         iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2214
2215         return 1;
2216 }
2217
2218 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2219 {
2220         struct nv_swncq_port_priv *pp = ap->private_data;
2221         struct ata_queued_cmd *qc;
2222         struct ata_eh_info *ehi = &ap->link.eh_info;
2223         u32 serror;
2224         u8 ata_stat;
2225         int rc = 0;
2226
2227         ata_stat = ap->ops->check_status(ap);
2228         nv_swncq_irq_clear(ap, fis);
2229         if (!fis)
2230                 return;
2231
2232         if (ap->pflags & ATA_PFLAG_FROZEN)
2233                 return;
2234
2235         if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2236                 nv_swncq_hotplug(ap, fis);
2237                 return;
2238         }
2239
2240         if (!pp->qc_active)
2241                 return;
2242
2243         if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2244                 return;
2245         ap->ops->scr_write(ap, SCR_ERROR, serror);
2246
2247         if (ata_stat & ATA_ERR) {
2248                 ata_ehi_clear_desc(ehi);
2249                 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2250                 ehi->err_mask |= AC_ERR_DEV;
2251                 ehi->serror |= serror;
2252                 ehi->action |= ATA_EH_SOFTRESET;
2253                 ata_port_freeze(ap);
2254                 return;
2255         }
2256
2257         if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2258                 /* If the IRQ is backout, driver must issue
2259                  * the new command again some time later.
2260                  */
2261                 pp->ncq_flags |= ncq_saw_backout;
2262         }
2263
2264         if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2265                 pp->ncq_flags |= ncq_saw_sdb;
2266                 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2267                         "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2268                         ap->print_id, pp->qc_active, pp->dhfis_bits,
2269                         pp->dmafis_bits, readl(pp->sactive_block));
2270                 rc = nv_swncq_sdbfis(ap);
2271                 if (rc < 0)
2272                         goto irq_error;
2273         }
2274
2275         if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2276                 /* The interrupt indicates the new command
2277                  * was transmitted correctly to the drive.
2278                  */
2279                 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2280                 pp->ncq_flags |= ncq_saw_d2h;
2281                 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2282                         ata_ehi_push_desc(ehi, "illegal fis transaction");
2283                         ehi->err_mask |= AC_ERR_HSM;
2284                         ehi->action |= ATA_EH_HARDRESET;
2285                         goto irq_error;
2286                 }
2287
2288                 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2289                     !(pp->ncq_flags & ncq_saw_dmas)) {
2290                         ata_stat = ap->ops->check_status(ap);
2291                         if (ata_stat & ATA_BUSY)
2292                                 goto irq_exit;
2293
2294                         if (pp->defer_queue.defer_bits) {
2295                                 DPRINTK("send next command\n");
2296                                 qc = nv_swncq_qc_from_dq(ap);
2297                                 nv_swncq_issue_atacmd(ap, qc);
2298                         }
2299                 }
2300         }
2301
2302         if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2303                 /* program the dma controller with appropriate PRD buffers
2304                  * and start the DMA transfer for requested command.
2305                  */
2306                 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2307                 pp->ncq_flags |= ncq_saw_dmas;
2308                 rc = nv_swncq_dmafis(ap);
2309         }
2310
2311 irq_exit:
2312         return;
2313 irq_error:
2314         ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2315         ata_port_freeze(ap);
2316         return;
2317 }
2318
2319 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2320 {
2321         struct ata_host *host = dev_instance;
2322         unsigned int i;
2323         unsigned int handled = 0;
2324         unsigned long flags;
2325         u32 irq_stat;
2326
2327         spin_lock_irqsave(&host->lock, flags);
2328
2329         irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2330
2331         for (i = 0; i < host->n_ports; i++) {
2332                 struct ata_port *ap = host->ports[i];
2333
2334                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2335                         if (ap->link.sactive) {
2336                                 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2337                                 handled = 1;
2338                         } else {
2339                                 if (irq_stat)   /* reserve Hotplug */
2340                                         nv_swncq_irq_clear(ap, 0xfff0);
2341
2342                                 handled += nv_host_intr(ap, (u8)irq_stat);
2343                         }
2344                 }
2345                 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2346         }
2347
2348         spin_unlock_irqrestore(&host->lock, flags);
2349
2350         return IRQ_RETVAL(handled);
2351 }
2352
2353 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
2354 {
2355         static int printed_version = 0;
2356         const struct ata_port_info *ppi[] = { NULL, NULL };
2357         struct ata_host *host;
2358         struct nv_host_priv *hpriv;
2359         int rc;
2360         u32 bar;
2361         void __iomem *base;
2362         unsigned long type = ent->driver_data;
2363
2364         // Make sure this is a SATA controller by counting the number of bars
2365         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2366         // it's an IDE controller and we ignore it.
2367         for (bar=0; bar<6; bar++)
2368                 if (pci_resource_start(pdev, bar) == 0)
2369                         return -ENODEV;
2370
2371         if (!printed_version++)
2372                 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2373
2374         rc = pcim_enable_device(pdev);
2375         if (rc)
2376                 return rc;
2377
2378         /* determine type and allocate host */
2379         if (type == CK804 && adma_enabled) {
2380                 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2381                 type = ADMA;
2382         }
2383
2384         ppi[0] = &nv_port_info[type];
2385         rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
2386         if (rc)
2387                 return rc;
2388
2389         hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2390         if (!hpriv)
2391                 return -ENOMEM;
2392         hpriv->type = type;
2393         host->private_data = hpriv;
2394
2395         /* set 64bit dma masks, may fail */
2396         if (type == ADMA) {
2397                 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
2398                         pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2399         }
2400
2401         /* request and iomap NV_MMIO_BAR */
2402         rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2403         if (rc)
2404                 return rc;
2405
2406         /* configure SCR access */
2407         base = host->iomap[NV_MMIO_BAR];
2408         host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2409         host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2410
2411         /* enable SATA space for CK804 */
2412         if (type >= CK804) {
2413                 u8 regval;
2414
2415                 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2416                 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2417                 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2418         }
2419
2420         /* init ADMA */
2421         if (type == ADMA) {
2422                 rc = nv_adma_host_init(host);
2423                 if (rc)
2424                         return rc;
2425         } else if (type == SWNCQ && swncq_enabled) {
2426                 dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
2427                 nv_swncq_host_init(host);
2428         }
2429
2430         pci_set_master(pdev);
2431         return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
2432                                  IRQF_SHARED, ppi[0]->sht);
2433 }
2434
2435 #ifdef CONFIG_PM
2436 static int nv_pci_device_resume(struct pci_dev *pdev)
2437 {
2438         struct ata_host *host = dev_get_drvdata(&pdev->dev);
2439         struct nv_host_priv *hpriv = host->private_data;
2440         int rc;
2441
2442         rc = ata_pci_device_do_resume(pdev);
2443         if(rc)
2444                 return rc;
2445
2446         if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2447                 if(hpriv->type >= CK804) {
2448                         u8 regval;
2449
2450                         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2451                         regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2452                         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2453                 }
2454                 if(hpriv->type == ADMA) {
2455                         u32 tmp32;
2456                         struct nv_adma_port_priv *pp;
2457                         /* enable/disable ADMA on the ports appropriately */
2458                         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2459
2460                         pp = host->ports[0]->private_data;
2461                         if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2462                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2463                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2464                         else
2465                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2466                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2467                         pp = host->ports[1]->private_data;
2468                         if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2469                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2470                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2471                         else
2472                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2473                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2474
2475                         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2476                 }
2477         }
2478
2479         ata_host_resume(host);
2480
2481         return 0;
2482 }
2483 #endif
2484
2485 static void nv_ck804_host_stop(struct ata_host *host)
2486 {
2487         struct pci_dev *pdev = to_pci_dev(host->dev);
2488         u8 regval;
2489
2490         /* disable SATA space for CK804 */
2491         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2492         regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2493         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2494 }
2495
2496 static void nv_adma_host_stop(struct ata_host *host)
2497 {
2498         struct pci_dev *pdev = to_pci_dev(host->dev);
2499         u32 tmp32;
2500
2501         /* disable ADMA on the ports */
2502         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2503         tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2504                    NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2505                    NV_MCP_SATA_CFG_20_PORT1_EN |
2506                    NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2507
2508         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2509
2510         nv_ck804_host_stop(host);
2511 }
2512
2513 static int __init nv_init(void)
2514 {
2515         return pci_register_driver(&nv_pci_driver);
2516 }
2517
2518 static void __exit nv_exit(void)
2519 {
2520         pci_unregister_driver(&nv_pci_driver);
2521 }
2522
2523 module_init(nv_init);
2524 module_exit(nv_exit);
2525 module_param_named(adma, adma_enabled, bool, 0444);
2526 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2527 module_param_named(swncq, swncq_enabled, bool, 0444);
2528 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
2529