#include <linux/libata.h>
#define DRV_NAME "sata_nv"
-#define DRV_VERSION "3.4"
+#define DRV_VERSION "3.5"
#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
NV_ADMA_STAT_STOPPED = (1 << 10),
NV_ADMA_STAT_DONE = (1 << 12),
NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
- NV_ADMA_STAT_TIMEOUT,
+ NV_ADMA_STAT_TIMEOUT,
/* port flags */
NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
+ /* MCP55 reg offset */
+ NV_CTL_MCP55 = 0x400,
+ NV_INT_STATUS_MCP55 = 0x440,
+ NV_INT_ENABLE_MCP55 = 0x444,
+ NV_NCQ_REG_MCP55 = 0x448,
+
+ /* MCP55 */
+ NV_INT_ALL_MCP55 = 0xffff,
+ NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
+ NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
+
+ /* SWNCQ ENABLE BITS*/
+ NV_CTL_PRI_SWNCQ = 0x02,
+ NV_CTL_SEC_SWNCQ = 0x04,
+
+ /* SW NCQ status bits*/
+ NV_SWNCQ_IRQ_DEV = (1 << 0),
+ NV_SWNCQ_IRQ_PM = (1 << 1),
+ NV_SWNCQ_IRQ_ADDED = (1 << 2),
+ NV_SWNCQ_IRQ_REMOVED = (1 << 3),
+
+ NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
+ NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
+ NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
+ NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
+
+ NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
+ NV_SWNCQ_IRQ_REMOVED,
+
};
/* ADMA Physical Region Descriptor - one SG segment */
u8 reserved1; /* 1 */
u8 ctl_flags; /* 2 */
/* len is length of taskfile in 64 bit words */
- u8 len; /* 3 */
+ u8 len; /* 3 */
u8 tag; /* 4 */
u8 next_cpb_idx; /* 5 */
__le16 reserved2; /* 6-7 */
dma_addr_t cpb_dma;
struct nv_adma_prd *aprd;
dma_addr_t aprd_dma;
- void __iomem * ctl_block;
- void __iomem * gen_block;
- void __iomem * notifier_clear_block;
+ void __iomem *ctl_block;
+ void __iomem *gen_block;
+ void __iomem *notifier_clear_block;
u8 flags;
int last_issue_ncq;
};
unsigned long type;
};
-#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
+struct defer_queue {
+ u32 defer_bits;
+ unsigned int head;
+ unsigned int tail;
+ unsigned int tag[ATA_MAX_QUEUE];
+};
+
+enum ncq_saw_flag_list {
+ ncq_saw_d2h = (1U << 0),
+ ncq_saw_dmas = (1U << 1),
+ ncq_saw_sdb = (1U << 2),
+ ncq_saw_backout = (1U << 3),
+};
+
+struct nv_swncq_port_priv {
+ struct ata_prd *prd; /* our SG list */
+ dma_addr_t prd_dma; /* and its DMA mapping */
+ void __iomem *sactive_block;
+ void __iomem *irq_block;
+ void __iomem *tag_block;
+ u32 qc_active;
+
+ unsigned int last_issue_tag;
+
+ /* fifo circular queue to store deferral command */
+ struct defer_queue defer_queue;
-static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+ /* for NCQ interrupt analysis */
+ u32 dhfis_bits;
+ u32 dmafis_bits;
+ u32 sdbfis_bits;
+
+ unsigned int ncq_flags;
+};
+
+
+#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
+
+static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM
static int nv_pci_device_resume(struct pci_dev *pdev);
#endif
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
-static int nv_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val);
-static int nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
static void nv_nf2_freeze(struct ata_port *ap);
static void nv_nf2_thaw(struct ata_port *ap);
static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
+static void nv_mcp55_thaw(struct ata_port *ap);
+static void nv_mcp55_freeze(struct ata_port *ap);
+static void nv_swncq_error_handler(struct ata_port *ap);
+static int nv_swncq_slave_config(struct scsi_device *sdev);
+static int nv_swncq_port_start(struct ata_port *ap);
+static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
+static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
+static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
+static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
+static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
+#ifdef CONFIG_PM
+static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
+static int nv_swncq_port_resume(struct ata_port *ap);
+#endif
+
enum nv_host_type
{
GENERIC,
NFORCE2,
NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
CK804,
- ADMA
+ ADMA,
+ SWNCQ,
};
static const struct pci_device_id nv_pci_tbl[] = {
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
- { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
+ { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
.bios_param = ata_std_bios_param,
};
+static struct scsi_host_template nv_swncq_sht = {
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .ioctl = ata_scsi_ioctl,
+ .queuecommand = ata_scsi_queuecmd,
+ .change_queue_depth = ata_scsi_change_queue_depth,
+ .can_queue = ATA_MAX_QUEUE,
+ .this_id = ATA_SHT_THIS_ID,
+ .sg_tablesize = LIBATA_MAX_PRD,
+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
+ .emulated = ATA_SHT_EMULATED,
+ .use_clustering = ATA_SHT_USE_CLUSTERING,
+ .proc_name = DRV_NAME,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ .slave_configure = nv_swncq_slave_config,
+ .slave_destroy = ata_scsi_slave_destroy,
+ .bios_param = ata_std_bios_param,
+};
+
static const struct ata_port_operations nv_generic_ops = {
- .port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.exec_command = ata_exec_command,
.data_xfer = ata_data_xfer,
.irq_clear = ata_bmdma_irq_clear,
.irq_on = ata_irq_on,
- .irq_ack = ata_irq_ack,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
.port_start = ata_port_start,
};
static const struct ata_port_operations nv_nf2_ops = {
- .port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.exec_command = ata_exec_command,
.data_xfer = ata_data_xfer,
.irq_clear = ata_bmdma_irq_clear,
.irq_on = ata_irq_on,
- .irq_ack = ata_irq_ack,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
.port_start = ata_port_start,
};
static const struct ata_port_operations nv_ck804_ops = {
- .port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.exec_command = ata_exec_command,
.data_xfer = ata_data_xfer,
.irq_clear = ata_bmdma_irq_clear,
.irq_on = ata_irq_on,
- .irq_ack = ata_irq_ack,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
.port_start = ata_port_start,
};
static const struct ata_port_operations nv_adma_ops = {
- .port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = nv_adma_tf_read,
.check_atapi_dma = nv_adma_check_atapi_dma,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
+ .qc_defer = ata_std_qc_defer,
.qc_prep = nv_adma_qc_prep,
.qc_issue = nv_adma_qc_issue,
.freeze = nv_adma_freeze,
.data_xfer = ata_data_xfer,
.irq_clear = nv_adma_irq_clear,
.irq_on = ata_irq_on,
- .irq_ack = ata_irq_ack,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
.port_start = nv_adma_port_start,
.host_stop = nv_adma_host_stop,
};
+static const struct ata_port_operations nv_swncq_ops = {
+ .tf_load = ata_tf_load,
+ .tf_read = ata_tf_read,
+ .exec_command = ata_exec_command,
+ .check_status = ata_check_status,
+ .dev_select = ata_std_dev_select,
+ .bmdma_setup = ata_bmdma_setup,
+ .bmdma_start = ata_bmdma_start,
+ .bmdma_stop = ata_bmdma_stop,
+ .bmdma_status = ata_bmdma_status,
+ .qc_defer = ata_std_qc_defer,
+ .qc_prep = nv_swncq_qc_prep,
+ .qc_issue = nv_swncq_qc_issue,
+ .freeze = nv_mcp55_freeze,
+ .thaw = nv_mcp55_thaw,
+ .error_handler = nv_swncq_error_handler,
+ .post_internal_cmd = ata_bmdma_post_internal_cmd,
+ .data_xfer = ata_data_xfer,
+ .irq_clear = ata_bmdma_irq_clear,
+ .irq_on = ata_irq_on,
+ .scr_read = nv_scr_read,
+ .scr_write = nv_scr_write,
+#ifdef CONFIG_PM
+ .port_suspend = nv_swncq_port_suspend,
+ .port_resume = nv_swncq_port_resume,
+#endif
+ .port_start = nv_swncq_port_start,
+};
+
static const struct ata_port_info nv_port_info[] = {
/* generic */
{
.sht = &nv_sht,
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_HRST_TO_RESUME,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .link_flags = ATA_LFLAG_HRST_TO_RESUME,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
/* nforce2/3 */
{
.sht = &nv_sht,
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_HRST_TO_RESUME,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .link_flags = ATA_LFLAG_HRST_TO_RESUME,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
/* ck804 */
{
.sht = &nv_sht,
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_HRST_TO_RESUME,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .link_flags = ATA_LFLAG_HRST_TO_RESUME,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
{
.sht = &nv_adma_sht,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_HRST_TO_RESUME |
ATA_FLAG_MMIO | ATA_FLAG_NCQ,
+ .link_flags = ATA_LFLAG_HRST_TO_RESUME,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
.port_ops = &nv_adma_ops,
.irq_handler = nv_adma_interrupt,
},
+ /* SWNCQ */
+ {
+ .sht = &nv_swncq_sht,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_NCQ,
+ .link_flags = ATA_LFLAG_HRST_TO_RESUME,
+ .pio_mask = NV_PIO_MASK,
+ .mwdma_mask = NV_MWDMA_MASK,
+ .udma_mask = NV_UDMA_MASK,
+ .port_ops = &nv_swncq_ops,
+ .irq_handler = nv_swncq_interrupt,
+ },
};
MODULE_AUTHOR("NVIDIA");
MODULE_VERSION(DRV_VERSION);
static int adma_enabled = 1;
+static int swncq_enabled;
static void nv_adma_register_mode(struct ata_port *ap)
{
return;
status = readw(mmio + NV_ADMA_STAT);
- while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
+ while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
ndelay(50);
status = readw(mmio + NV_ADMA_STAT);
count++;
}
- if(count == 20)
+ if (count == 20)
ata_port_printk(ap, KERN_WARNING,
"timeout waiting for ADMA IDLE, stat=0x%hx\n",
status);
count = 0;
status = readw(mmio + NV_ADMA_STAT);
- while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
+ while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
ndelay(50);
status = readw(mmio + NV_ADMA_STAT);
count++;
}
- if(count == 20)
+ if (count == 20)
ata_port_printk(ap, KERN_WARNING,
"timeout waiting for ADMA LEGACY, stat=0x%hx\n",
status);
writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
status = readw(mmio + NV_ADMA_STAT);
- while(((status & NV_ADMA_STAT_LEGACY) ||
+ while (((status & NV_ADMA_STAT_LEGACY) ||
!(status & NV_ADMA_STAT_IDLE)) && count < 20) {
ndelay(50);
status = readw(mmio + NV_ADMA_STAT);
count++;
}
- if(count == 20)
+ if (count == 20)
ata_port_printk(ap, KERN_WARNING,
"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
status);
/* Not a proper libata device, ignore */
return rc;
- if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
+ if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
/*
* NVIDIA reports that ADMA mode does not support ATAPI commands.
* Therefore ATAPI commands are sent through the legacy interface.
on the port. */
adma_enable = 0;
nv_adma_register_mode(ap);
- }
- else {
+ } else {
bounce_limit = *ap->dev->dma_mask;
segment_boundary = NV_ADMA_DMA_BOUNDARY;
sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, ¤t_reg);
- if(ap->port_no == 1)
+ if (ap->port_no == 1)
config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
else
config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
- if(adma_enable) {
+ if (adma_enable) {
new_reg = current_reg | config_mask;
pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
- }
- else {
+ } else {
new_reg = current_reg & ~config_mask;
pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
}
- if(current_reg != new_reg)
+ if (current_reg != new_reg)
pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
{
unsigned int idx = 0;
- if(tf->flags & ATA_TFLAG_ISADDR) {
+ if (tf->flags & ATA_TFLAG_ISADDR) {
if (tf->flags & ATA_TFLAG_LBA48) {
cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
}
- if(tf->flags & ATA_TFLAG_DEVICE)
+ if (tf->flags & ATA_TFLAG_DEVICE)
cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
- while(idx < 12)
+ while (idx < 12)
cpb[idx++] = cpu_to_le16(IGN);
return idx;
flags & (NV_CPB_RESP_ATA_ERR |
NV_CPB_RESP_CMD_ERR |
NV_CPB_RESP_CPB_ERR)))) {
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
int freeze = 0;
ata_ehi_clear_desc(ehi);
- __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags );
+ __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
if (flags & NV_CPB_RESP_ATA_ERR) {
ata_ehi_push_desc(ehi, "ATA error");
ehi->err_mask |= AC_ERR_DEV;
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
VPRINTK("CPB flags done, flags=0x%x\n", flags);
if (likely(qc)) {
- DPRINTK("Completing qc from tag %d\n",cpb_num);
+ DPRINTK("Completing qc from tag %d\n", cpb_num);
ata_qc_complete(qc);
} else {
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
/* Notifier bits set without a command may indicate the drive
is misbehaving. Raise host state machine violation on this
condition. */
- ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
- cpb_num);
+ ata_port_printk(ap, KERN_ERR,
+ "notifier for tag %d with no cmd?\n",
+ cpb_num);
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_SOFTRESET;
ata_port_freeze(ap);
static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
{
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+ struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
/* freeze if hotplugged */
if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
>> (NV_INT_PORT_SHIFT * i);
- if(ata_tag_valid(ap->active_tag))
+ if (ata_tag_valid(ap->link.active_tag))
/** NV_INT_DEV indication seems unreliable at times
at least in ADMA mode. Force it on always when a
command is active, to prevent losing interrupts. */
gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
- if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
+ if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
!notifier_error)
/* Nothing to do */
continue;
NV_ADMA_STAT_HOTUNPLUG |
NV_ADMA_STAT_TIMEOUT |
NV_ADMA_STAT_SERROR))) {
- struct ata_eh_info *ehi = &ap->eh_info;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
ata_ehi_clear_desc(ehi);
- __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status );
+ __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
if (status & NV_ADMA_STAT_TIMEOUT) {
ehi->err_mask |= AC_ERR_SYSTEM;
ata_ehi_push_desc(ehi, "timeout");
u32 check_commands;
int pos, error = 0;
- if(ata_tag_valid(ap->active_tag))
- check_commands = 1 << ap->active_tag;
+ if (ata_tag_valid(ap->link.active_tag))
+ check_commands = 1 << ap->link.active_tag;
else
- check_commands = ap->sactive;
+ check_commands = ap->link.sactive;
/** Check CPBs for completed commands */
while ((pos = ffs(check_commands)) && !error) {
pos--;
error = nv_adma_check_cpb(ap, pos,
- notifier_error & (1 << pos) );
- check_commands &= ~(1 << pos );
+ notifier_error & (1 << pos));
+ check_commands &= ~(1 << pos);
}
}
}
}
- if(notifier_clears[0] || notifier_clears[1]) {
+ if (notifier_clears[0] || notifier_clears[1]) {
/* Note: Both notifier clear registers must be written
if either is set, even if one is zero, according to NVIDIA. */
struct nv_adma_port_priv *pp = host->ports[0]->private_data;
return;
/* clear any outstanding CK804 notifications */
- writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+ writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
/* Disable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
- writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+ writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
static void nv_adma_thaw(struct ata_port *ap)
/* Enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
- writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
+ writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
static void nv_adma_irq_clear(struct ata_port *ap)
}
/* clear any outstanding CK804 notifications */
- writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
+ writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
/* clear ADMA status */
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
- if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
+ if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
ata_bmdma_post_internal_cmd(qc);
}
pp->cpb_dma = mem_dma;
writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
- writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
+ writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
/* clear GO for register mode, enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
- writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
- NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+ writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+ NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
return 0;
}
/* set CPB block location */
writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
- writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
+ writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
/* clear any outstanding interrupt conditions */
writew(0xffff, mmio + NV_ADMA_STAT);
/* clear GO for register mode, enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
- writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
- NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+ writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+ NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
return 0;
}
idx = 0;
ata_for_each_sg(sg, qc) {
- aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
+ aprd = (idx < 5) ? &cpb->aprd[idx] :
+ &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
nv_adma_fill_aprd(qc, sg, idx, aprd);
idx++;
}
/* ADMA engine can only be used for non-ATAPI DMA commands,
or interrupt-driven no-data commands, where a result taskfile
is not required. */
- if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
+ if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
(qc->tf.flags & ATA_TFLAG_POLLING) ||
(qc->flags & ATA_QCFLAG_RESULT_TF))
return 1;
- if((qc->flags & ATA_QCFLAG_DMAMAP) ||
+ if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
(qc->tf.protocol == ATA_PROT_NODATA))
return 0;
nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
- if(qc->flags & ATA_QCFLAG_DMAMAP) {
+ if (qc->flags & ATA_QCFLAG_DMAMAP) {
nv_adma_fill_sg(qc, cpb);
ctl_flags |= NV_CPB_CTL_APRD_VALID;
} else
memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
- /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
- finished filling in all of the contents */
+ /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
+ until we are finished filling in all of the contents */
wmb();
cpb->ctl_flags = ctl_flags;
wmb();
and (number of cpbs to append -1) in top 8 bits */
wmb();
- if(curr_ncq != pp->last_issue_ncq) {
- /* Seems to need some delay before switching between NCQ and non-NCQ
- commands, else we get command timeouts and such. */
+ if (curr_ncq != pp->last_issue_ncq) {
+ /* Seems to need some delay before switching between NCQ and
+ non-NCQ commands, else we get command timeouts and such. */
udelay(20);
pp->last_issue_ncq = curr_ncq;
}
writew(qc->tag, mmio + NV_ADMA_APPEND);
- DPRINTK("Issued tag %u\n",qc->tag);
+ DPRINTK("Issued tag %u\n", qc->tag);
return 0;
}
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
- qc = ata_qc_from_tag(ap, ap->active_tag);
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
handled += ata_host_intr(ap, qc);
else
writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
}
-static int nv_hardreset(struct ata_port *ap, unsigned int *class,
+static void nv_mcp55_freeze(struct ata_port *ap)
+{
+ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+ int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
+ u32 mask;
+
+ writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
+
+ mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
+ mask &= ~(NV_INT_ALL_MCP55 << shift);
+ writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
+ ata_bmdma_freeze(ap);
+}
+
+static void nv_mcp55_thaw(struct ata_port *ap)
+{
+ void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
+ int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
+ u32 mask;
+
+ writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
+
+ mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
+ mask |= (NV_INT_MASK_MCP55 << shift);
+ writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
+ ata_bmdma_thaw(ap);
+}
+
+static int nv_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
unsigned int dummy;
* some controllers. Don't classify on hardreset. For more
* info, see http://bugme.osdl.org/show_bug.cgi?id=3352
*/
- return sata_std_hardreset(ap, &dummy, deadline);
+ return sata_std_hardreset(link, &dummy, deadline);
}
static void nv_error_handler(struct ata_port *ap)
static void nv_adma_error_handler(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
- if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
+ if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
void __iomem *mmio = pp->ctl_block;
int i;
u16 tmp;
- if(ata_tag_valid(ap->active_tag) || ap->sactive) {
+ if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
- ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
+ ata_port_printk(ap, KERN_ERR,
+ "EH in ADMA mode, notifier 0x%X "
"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
"next cpb count 0x%X next cpb idx 0x%x\n",
notifier, notifier_error, gen_ctl, status,
cpb_count, next_cpb_idx);
- for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
+ for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
struct nv_adma_cpb *cpb = &pp->cpb[i];
- if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
- ap->sactive & (1 << i) )
+ if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
+ ap->link.sactive & (1 << i))
ata_port_printk(ap, KERN_ERR,
"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
i, cpb->ctl_flags, cpb->resp_flags);
/* Push us back into port register mode for error handling. */
nv_adma_register_mode(ap);
- /* Mark all of the CPBs as invalid to prevent them from being executed */
- for( i=0;i<NV_ADMA_MAX_CPBS;i++)
+ /* Mark all of the CPBs as invalid to prevent them from
+ being executed */
+ for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
/* clear CPB fetch count */
/* Reset channel */
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
+ }
+
+ ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
+ nv_hardreset, ata_std_postreset);
+}
+
+static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct defer_queue *dq = &pp->defer_queue;
+
+ /* queue is full */
+ WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
+ dq->defer_bits |= (1 << qc->tag);
+ dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
+}
+
+static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct defer_queue *dq = &pp->defer_queue;
+ unsigned int tag;
+
+ if (dq->head == dq->tail) /* null queue */
+ return NULL;
+
+ tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
+ dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
+ WARN_ON(!(dq->defer_bits & (1 << tag)));
+ dq->defer_bits &= ~(1 << tag);
+
+ return ata_qc_from_tag(ap, tag);
+}
+
+static void nv_swncq_fis_reinit(struct ata_port *ap)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+
+ pp->dhfis_bits = 0;
+ pp->dmafis_bits = 0;
+ pp->sdbfis_bits = 0;
+ pp->ncq_flags = 0;
+}
+
+static void nv_swncq_pp_reinit(struct ata_port *ap)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct defer_queue *dq = &pp->defer_queue;
+
+ dq->head = 0;
+ dq->tail = 0;
+ dq->defer_bits = 0;
+ pp->qc_active = 0;
+ pp->last_issue_tag = ATA_TAG_POISON;
+ nv_swncq_fis_reinit(ap);
+}
+
+static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+
+ writew(fis, pp->irq_block);
+}
+
+static void __ata_bmdma_stop(struct ata_port *ap)
+{
+ struct ata_queued_cmd qc;
+
+ qc.ap = ap;
+ ata_bmdma_stop(&qc);
+}
+
+static void nv_swncq_ncq_stop(struct ata_port *ap)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ unsigned int i;
+ u32 sactive;
+ u32 done_mask;
+
+ ata_port_printk(ap, KERN_ERR,
+ "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
+ ap->qc_active, ap->link.sactive);
+ ata_port_printk(ap, KERN_ERR,
+ "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
+ "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
+ pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
+ pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
+
+ ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
+ ap->ops->check_status(ap),
+ ioread8(ap->ioaddr.error_addr));
+
+ sactive = readl(pp->sactive_block);
+ done_mask = pp->qc_active ^ sactive;
+
+ ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
+ for (i = 0; i < ATA_MAX_QUEUE; i++) {
+ u8 err = 0;
+ if (pp->qc_active & (1 << i))
+ err = 0;
+ else if (done_mask & (1 << i))
+ err = 1;
+ else
+ continue;
+
+ ata_port_printk(ap, KERN_ERR,
+ "tag 0x%x: %01x %01x %01x %01x %s\n", i,
+ (pp->dhfis_bits >> i) & 0x1,
+ (pp->dmafis_bits >> i) & 0x1,
+ (pp->sdbfis_bits >> i) & 0x1,
+ (sactive >> i) & 0x1,
+ (err ? "error! tag doesn't exit" : " "));
+ }
+
+ nv_swncq_pp_reinit(ap);
+ ap->ops->irq_clear(ap);
+ __ata_bmdma_stop(ap);
+ nv_swncq_irq_clear(ap, 0xffff);
+}
+
+static void nv_swncq_error_handler(struct ata_port *ap)
+{
+ struct ata_eh_context *ehc = &ap->link.eh_context;
+
+ if (ap->link.sactive) {
+ nv_swncq_ncq_stop(ap);
+ ehc->i.action |= ATA_EH_HARDRESET;
}
ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
nv_hardreset, ata_std_postreset);
}
-static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+#ifdef CONFIG_PM
+static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
+{
+ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+ u32 tmp;
+
+ /* clear irq */
+ writel(~0, mmio + NV_INT_STATUS_MCP55);
+
+ /* disable irq */
+ writel(0, mmio + NV_INT_ENABLE_MCP55);
+
+ /* disable swncq */
+ tmp = readl(mmio + NV_CTL_MCP55);
+ tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
+ writel(tmp, mmio + NV_CTL_MCP55);
+
+ return 0;
+}
+
+static int nv_swncq_port_resume(struct ata_port *ap)
+{
+ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+ u32 tmp;
+
+ /* clear irq */
+ writel(~0, mmio + NV_INT_STATUS_MCP55);
+
+ /* enable irq */
+ writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
+
+ /* enable swncq */
+ tmp = readl(mmio + NV_CTL_MCP55);
+ writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
+
+ return 0;
+}
+#endif
+
+static void nv_swncq_host_init(struct ata_host *host)
+{
+ u32 tmp;
+ void __iomem *mmio = host->iomap[NV_MMIO_BAR];
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+ u8 regval;
+
+ /* disable ECO 398 */
+ pci_read_config_byte(pdev, 0x7f, ®val);
+ regval &= ~(1 << 7);
+ pci_write_config_byte(pdev, 0x7f, regval);
+
+ /* enable swncq */
+ tmp = readl(mmio + NV_CTL_MCP55);
+ VPRINTK("HOST_CTL:0x%X\n", tmp);
+ writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
+
+ /* enable irq intr */
+ tmp = readl(mmio + NV_INT_ENABLE_MCP55);
+ VPRINTK("HOST_ENABLE:0x%X\n", tmp);
+ writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
+
+ /* clear port irq */
+ writel(~0x0, mmio + NV_INT_STATUS_MCP55);
+}
+
+static int nv_swncq_slave_config(struct scsi_device *sdev)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct ata_device *dev;
+ int rc;
+ u8 rev;
+ u8 check_maxtor = 0;
+ unsigned char model_num[ATA_ID_PROD_LEN + 1];
+
+ rc = ata_scsi_slave_config(sdev);
+ if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
+ /* Not a proper libata device, ignore */
+ return rc;
+
+ dev = &ap->link.device[sdev->id];
+ if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
+ return rc;
+
+ /* if MCP51 and Maxtor, then disable ncq */
+ if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
+ pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
+ check_maxtor = 1;
+
+ /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
+ if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
+ pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
+ pci_read_config_byte(pdev, 0x8, &rev);
+ if (rev <= 0xa2)
+ check_maxtor = 1;
+ }
+
+ if (!check_maxtor)
+ return rc;
+
+ ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
+
+ if (strncmp(model_num, "Maxtor", 6) == 0) {
+ ata_scsi_change_queue_depth(sdev, 1);
+ ata_dev_printk(dev, KERN_NOTICE,
+ "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
+ }
+
+ return rc;
+}
+
+static int nv_swncq_port_start(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
+ struct nv_swncq_port_priv *pp;
+ int rc;
+
+ rc = ata_port_start(ap);
+ if (rc)
+ return rc;
+
+ pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
+ &pp->prd_dma, GFP_KERNEL);
+ if (!pp->prd)
+ return -ENOMEM;
+ memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
+
+ ap->private_data = pp;
+ pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
+ pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
+ pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
+
+ return 0;
+}
+
+static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (qc->tf.protocol != ATA_PROT_NCQ) {
+ ata_qc_prep(qc);
+ return;
+ }
+
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ nv_swncq_fill_sg(qc);
+}
+
+static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct scatterlist *sg;
+ unsigned int idx;
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct ata_prd *prd;
+
+ WARN_ON(qc->__sg == NULL);
+ WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
+
+ prd = pp->prd + ATA_MAX_PRD * qc->tag;
+
+ idx = 0;
+ ata_for_each_sg(sg, qc) {
+ u32 addr, offset;
+ u32 sg_len, len;
+
+ addr = (u32)sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ while (sg_len) {
+ offset = addr & 0xffff;
+ len = sg_len;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ prd[idx].addr = cpu_to_le32(addr);
+ prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+
+ idx++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ if (idx)
+ prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
+ struct ata_queued_cmd *qc)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+
+ if (qc == NULL)
+ return 0;
+
+ DPRINTK("Enter\n");
+
+ writel((1 << qc->tag), pp->sactive_block);
+ pp->last_issue_tag = qc->tag;
+ pp->dhfis_bits &= ~(1 << qc->tag);
+ pp->dmafis_bits &= ~(1 << qc->tag);
+ pp->qc_active |= (0x1 << qc->tag);
+
+ ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
+ ap->ops->exec_command(ap, &qc->tf);
+
+ DPRINTK("Issued tag %u\n", qc->tag);
+
+ return 0;
+}
+
+static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct nv_swncq_port_priv *pp = ap->private_data;
+
+ if (qc->tf.protocol != ATA_PROT_NCQ)
+ return ata_qc_issue_prot(qc);
+
+ DPRINTK("Enter\n");
+
+ if (!pp->qc_active)
+ nv_swncq_issue_atacmd(ap, qc);
+ else
+ nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
+
+ return 0;
+}
+
+static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
+{
+ u32 serror;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+
+ ata_ehi_clear_desc(ehi);
+
+ /* AHCI needs SError cleared; otherwise, it might lock up */
+ sata_scr_read(&ap->link, SCR_ERROR, &serror);
+ sata_scr_write(&ap->link, SCR_ERROR, serror);
+
+ /* analyze @irq_stat */
+ if (fis & NV_SWNCQ_IRQ_ADDED)
+ ata_ehi_push_desc(ehi, "hot plug");
+ else if (fis & NV_SWNCQ_IRQ_REMOVED)
+ ata_ehi_push_desc(ehi, "hot unplug");
+
+ ata_ehi_hotplugged(ehi);
+
+ /* okay, let's hand over to EH */
+ ehi->serror |= serror;
+
+ ata_port_freeze(ap);
+}
+
+static int nv_swncq_sdbfis(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ u32 sactive;
+ int nr_done = 0;
+ u32 done_mask;
+ int i;
+ u8 host_stat;
+ u8 lack_dhfis = 0;
+
+ host_stat = ap->ops->bmdma_status(ap);
+ if (unlikely(host_stat & ATA_DMA_ERR)) {
+ /* error when transfering data to/from memory */
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+ ehi->err_mask |= AC_ERR_HOST_BUS;
+ ehi->action |= ATA_EH_SOFTRESET;
+ return -EINVAL;
+ }
+
+ ap->ops->irq_clear(ap);
+ __ata_bmdma_stop(ap);
+
+ sactive = readl(pp->sactive_block);
+ done_mask = pp->qc_active ^ sactive;
+
+ if (unlikely(done_mask & sactive)) {
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
+ "(%08x->%08x)", pp->qc_active, sactive);
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_HARDRESET;
+ return -EINVAL;
+ }
+ for (i = 0; i < ATA_MAX_QUEUE; i++) {
+ if (!(done_mask & (1 << i)))
+ continue;
+
+ qc = ata_qc_from_tag(ap, i);
+ if (qc) {
+ ata_qc_complete(qc);
+ pp->qc_active &= ~(1 << i);
+ pp->dhfis_bits &= ~(1 << i);
+ pp->dmafis_bits &= ~(1 << i);
+ pp->sdbfis_bits |= (1 << i);
+ nr_done++;
+ }
+ }
+
+ if (!ap->qc_active) {
+ DPRINTK("over\n");
+ nv_swncq_pp_reinit(ap);
+ return nr_done;
+ }
+
+ if (pp->qc_active & pp->dhfis_bits)
+ return nr_done;
+
+ if ((pp->ncq_flags & ncq_saw_backout) ||
+ (pp->qc_active ^ pp->dhfis_bits))
+ /* if the controller cann't get a device to host register FIS,
+ * The driver needs to reissue the new command.
+ */
+ lack_dhfis = 1;
+
+ DPRINTK("id 0x%x QC: qc_active 0x%x,"
+ "SWNCQ:qc_active 0x%X defer_bits %X "
+ "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
+ ap->print_id, ap->qc_active, pp->qc_active,
+ pp->defer_queue.defer_bits, pp->dhfis_bits,
+ pp->dmafis_bits, pp->last_issue_tag);
+
+ nv_swncq_fis_reinit(ap);
+
+ if (lack_dhfis) {
+ qc = ata_qc_from_tag(ap, pp->last_issue_tag);
+ nv_swncq_issue_atacmd(ap, qc);
+ return nr_done;
+ }
+
+ if (pp->defer_queue.defer_bits) {
+ /* send deferral queue command */
+ qc = nv_swncq_qc_from_dq(ap);
+ WARN_ON(qc == NULL);
+ nv_swncq_issue_atacmd(ap, qc);
+ }
+
+ return nr_done;
+}
+
+static inline u32 nv_swncq_tag(struct ata_port *ap)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ u32 tag;
+
+ tag = readb(pp->tag_block) >> 2;
+ return (tag & 0x1f);
+}
+
+static int nv_swncq_dmafis(struct ata_port *ap)
{
- static int printed_version = 0;
+ struct ata_queued_cmd *qc;
+ unsigned int rw;
+ u8 dmactl;
+ u32 tag;
+ struct nv_swncq_port_priv *pp = ap->private_data;
+
+ __ata_bmdma_stop(ap);
+ tag = nv_swncq_tag(ap);
+
+ DPRINTK("dma setup tag 0x%x\n", tag);
+ qc = ata_qc_from_tag(ap, tag);
+
+ if (unlikely(!qc))
+ return 0;
+
+ rw = qc->tf.flags & ATA_TFLAG_WRITE;
+
+ /* load PRD table addr. */
+ iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
+ ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+ /* specify data direction, triple-check start bit is clear */
+ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ dmactl &= ~ATA_DMA_WR;
+ if (!rw)
+ dmactl |= ATA_DMA_WR;
+
+ iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+ return 1;
+}
+
+static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
+{
+ struct nv_swncq_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc;
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ u32 serror;
+ u8 ata_stat;
+ int rc = 0;
+
+ ata_stat = ap->ops->check_status(ap);
+ nv_swncq_irq_clear(ap, fis);
+ if (!fis)
+ return;
+
+ if (ap->pflags & ATA_PFLAG_FROZEN)
+ return;
+
+ if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
+ nv_swncq_hotplug(ap, fis);
+ return;
+ }
+
+ if (!pp->qc_active)
+ return;
+
+ if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
+ return;
+ ap->ops->scr_write(ap, SCR_ERROR, serror);
+
+ if (ata_stat & ATA_ERR) {
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
+ ehi->err_mask |= AC_ERR_DEV;
+ ehi->serror |= serror;
+ ehi->action |= ATA_EH_SOFTRESET;
+ ata_port_freeze(ap);
+ return;
+ }
+
+ if (fis & NV_SWNCQ_IRQ_BACKOUT) {
+ /* If the IRQ is backout, driver must issue
+ * the new command again some time later.
+ */
+ pp->ncq_flags |= ncq_saw_backout;
+ }
+
+ if (fis & NV_SWNCQ_IRQ_SDBFIS) {
+ pp->ncq_flags |= ncq_saw_sdb;
+ DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
+ "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
+ ap->print_id, pp->qc_active, pp->dhfis_bits,
+ pp->dmafis_bits, readl(pp->sactive_block));
+ rc = nv_swncq_sdbfis(ap);
+ if (rc < 0)
+ goto irq_error;
+ }
+
+ if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
+ /* The interrupt indicates the new command
+ * was transmitted correctly to the drive.
+ */
+ pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
+ pp->ncq_flags |= ncq_saw_d2h;
+ if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
+ ata_ehi_push_desc(ehi, "illegal fis transaction");
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_HARDRESET;
+ goto irq_error;
+ }
+
+ if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
+ !(pp->ncq_flags & ncq_saw_dmas)) {
+ ata_stat = ap->ops->check_status(ap);
+ if (ata_stat & ATA_BUSY)
+ goto irq_exit;
+
+ if (pp->defer_queue.defer_bits) {
+ DPRINTK("send next command\n");
+ qc = nv_swncq_qc_from_dq(ap);
+ nv_swncq_issue_atacmd(ap, qc);
+ }
+ }
+ }
+
+ if (fis & NV_SWNCQ_IRQ_DMASETUP) {
+ /* program the dma controller with appropriate PRD buffers
+ * and start the DMA transfer for requested command.
+ */
+ pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
+ pp->ncq_flags |= ncq_saw_dmas;
+ rc = nv_swncq_dmafis(ap);
+ }
+
+irq_exit:
+ return;
+irq_error:
+ ata_ehi_push_desc(ehi, "fis:0x%x", fis);
+ ata_port_freeze(ap);
+ return;
+}
+
+static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *host = dev_instance;
+ unsigned int i;
+ unsigned int handled = 0;
+ unsigned long flags;
+ u32 irq_stat;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
+ if (ap->link.sactive) {
+ nv_swncq_host_interrupt(ap, (u16)irq_stat);
+ handled = 1;
+ } else {
+ if (irq_stat) /* reserve Hotplug */
+ nv_swncq_irq_clear(ap, 0xfff0);
+
+ handled += nv_host_intr(ap, (u8)irq_stat);
+ }
+ }
+ irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ static int printed_version;
const struct ata_port_info *ppi[] = { NULL, NULL };
struct ata_host *host;
struct nv_host_priv *hpriv;
// Make sure this is a SATA controller by counting the number of bars
// (NVIDIA SATA controllers will always have six bars). Otherwise,
// it's an IDE controller and we ignore it.
- for (bar=0; bar<6; bar++)
+ for (bar = 0; bar < 6; bar++)
if (pci_resource_start(pdev, bar) == 0)
return -ENODEV;
return rc;
/* determine type and allocate host */
- if (type >= CK804 && adma_enabled) {
+ if (type == CK804 && adma_enabled) {
dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
type = ADMA;
}
+ if (type == SWNCQ) {
+ if (swncq_enabled)
+ dev_printk(KERN_NOTICE, &pdev->dev,
+ "Using SWNCQ mode\n");
+ else
+ type = GENERIC;
+ }
+
ppi[0] = &nv_port_info[type];
rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
if (rc)
rc = nv_adma_host_init(host);
if (rc)
return rc;
- }
+ } else if (type == SWNCQ)
+ nv_swncq_host_init(host);
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
int rc;
rc = ata_pci_device_do_resume(pdev);
- if(rc)
+ if (rc)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- if(hpriv->type >= CK804) {
+ if (hpriv->type >= CK804) {
u8 regval;
pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val);
regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
}
- if(hpriv->type == ADMA) {
+ if (hpriv->type == ADMA) {
u32 tmp32;
struct nv_adma_port_priv *pp;
/* enable/disable ADMA on the ports appropriately */
pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
pp = host->ports[0]->private_data;
- if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
- NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+ NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
else
tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
- NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+ NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
pp = host->ports[1]->private_data;
- if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
- NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+ NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
else
tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
- NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+ NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
}
module_exit(nv_exit);
module_param_named(adma, adma_enabled, bool, 0444);
MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
+module_param_named(swncq, swncq_enabled, bool, 0444);
+MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
+