1 #include <linux/types.h>
2 #include <linux/kernel.h>
4 #include <linux/scatterlist.h>
5 #include <linux/dma-mapping.h>
9 * config_drive_for_dma - attempt to activate IDE DMA
10 * @drive: the drive to place in DMA mode
12 * If the drive supports at least mode 2 DMA or UDMA of any kind
13 * then attempt to place it into DMA mode. Drives that are known to
14 * support DMA but predate the DMA properties or that are known
15 * to have DMA handling bugs are also set up appropriately based
16 * on the good/bad drive lists.
19 int config_drive_for_dma(ide_drive_t *drive)
21 ide_hwif_t *hwif = drive->hwif;
24 if (drive->media != ide_disk) {
25 if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
30 * Enable DMA on any drive that has
31 * UltraDMA (mode 0/1/2/3/4/5/6) enabled
33 if ((id[ATA_ID_FIELD_VALID] & 4) &&
34 ((id[ATA_ID_UDMA_MODES] >> 8) & 0x7f))
38 * Enable DMA on any drive that has mode2 DMA
39 * (multi or single) enabled
41 if (id[ATA_ID_FIELD_VALID] & 2) /* regular DMA */
42 if ((id[ATA_ID_MWDMA_MODES] & 0x404) == 0x404 ||
43 (id[ATA_ID_SWDMA_MODES] & 0x404) == 0x404)
46 /* Consult the list of known "good" drives */
47 if (ide_dma_good_drive(drive))
53 u8 ide_dma_sff_read_status(ide_hwif_t *hwif)
55 unsigned long addr = hwif->dma_base + ATA_DMA_STATUS;
57 if (hwif->host_flags & IDE_HFLAG_MMIO)
58 return readb((void __iomem *)addr);
62 EXPORT_SYMBOL_GPL(ide_dma_sff_read_status);
65 * ide_dma_host_set - Enable/disable DMA on a host
66 * @drive: drive to control
68 * Enable/disable DMA on an IDE controller following generic
69 * bus-mastering IDE controller behaviour.
72 void ide_dma_host_set(ide_drive_t *drive, int on)
74 ide_hwif_t *hwif = drive->hwif;
75 u8 unit = drive->dn & 1;
76 u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
79 dma_stat |= (1 << (5 + unit));
81 dma_stat &= ~(1 << (5 + unit));
83 if (hwif->host_flags & IDE_HFLAG_MMIO)
85 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
87 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
89 EXPORT_SYMBOL_GPL(ide_dma_host_set);
92 * ide_build_dmatable - build IDE DMA table
94 * ide_build_dmatable() prepares a dma request. We map the command
95 * to get the pci bus addresses of the buffers and then build up
96 * the PRD table that the IDE layer wants to be fed.
98 * Most chipsets correctly interpret a length of 0x0000 as 64KB,
99 * but at least one (e.g. CS5530) misinterprets it as zero (!).
100 * So we break the 64KB entry into two 32KB entries instead.
102 * Returns the number of built PRD entries if all went okay,
103 * returns 0 otherwise.
105 * May also be invoked from trm290.c
108 int ide_build_dmatable(ide_drive_t *drive, struct request *rq)
110 ide_hwif_t *hwif = drive->hwif;
111 __le32 *table = (__le32 *)hwif->dmatable_cpu;
112 unsigned int count = 0;
114 struct scatterlist *sg;
115 u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
117 hwif->sg_nents = ide_build_sglist(drive, rq);
118 if (hwif->sg_nents == 0)
121 for_each_sg(hwif->sg_table, sg, hwif->sg_nents, i) {
122 u32 cur_addr, cur_len, xcount, bcount;
124 cur_addr = sg_dma_address(sg);
125 cur_len = sg_dma_len(sg);
128 * Fill in the dma table, without crossing any 64kB boundaries.
129 * Most hardware requires 16-bit alignment of all blocks,
130 * but the trm290 requires 32-bit alignment.
134 if (count++ >= PRD_ENTRIES)
135 goto use_pio_instead;
137 bcount = 0x10000 - (cur_addr & 0xffff);
138 if (bcount > cur_len)
140 *table++ = cpu_to_le32(cur_addr);
141 xcount = bcount & 0xffff;
143 xcount = ((xcount >> 2) - 1) << 16;
144 else if (xcount == 0x0000) {
145 if (count++ >= PRD_ENTRIES)
146 goto use_pio_instead;
147 *table++ = cpu_to_le32(0x8000);
148 *table++ = cpu_to_le32(cur_addr + 0x8000);
151 *table++ = cpu_to_le32(xcount);
159 *--table |= cpu_to_le32(0x80000000);
164 printk(KERN_ERR "%s: %s\n", drive->name,
165 count ? "DMA table too small" : "empty DMA table?");
167 ide_destroy_dmatable(drive);
169 return 0; /* revert to PIO for this request */
171 EXPORT_SYMBOL_GPL(ide_build_dmatable);
174 * ide_dma_setup - begin a DMA phase
175 * @drive: target device
177 * Build an IDE DMA PRD (IDE speak for scatter gather table)
178 * and then set up the DMA transfer registers for a device
179 * that follows generic IDE PCI DMA behaviour. Controllers can
180 * override this function if they need to
182 * Returns 0 on success. If a PIO fallback is required then 1
186 int ide_dma_setup(ide_drive_t *drive)
188 ide_hwif_t *hwif = drive->hwif;
189 struct request *rq = hwif->rq;
190 unsigned int reading = rq_data_dir(rq) ? 0 : ATA_DMA_WR;
191 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
194 /* fall back to pio! */
195 if (!ide_build_dmatable(drive, rq)) {
196 ide_map_sg(drive, rq);
201 if (hwif->host_flags & IDE_HFLAG_MMIO)
202 writel(hwif->dmatable_dma,
203 (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS));
205 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
209 writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
211 outb(reading, hwif->dma_base + ATA_DMA_CMD);
213 /* read DMA status for INTR & ERROR flags */
214 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
216 /* clear INTR & ERROR flags */
218 writeb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
219 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
221 outb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
222 hwif->dma_base + ATA_DMA_STATUS);
224 drive->waiting_for_dma = 1;
227 EXPORT_SYMBOL_GPL(ide_dma_setup);
230 * dma_timer_expiry - handle a DMA timeout
231 * @drive: Drive that timed out
233 * An IDE DMA transfer timed out. In the event of an error we ask
234 * the driver to resolve the problem, if a DMA transfer is still
235 * in progress we continue to wait (arguably we need to add a
236 * secondary 'I don't care what the drive thinks' timeout here)
237 * Finally if we have an interrupt we let it complete the I/O.
238 * But only one time - we clear expiry and if it's still not
239 * completed after WAIT_CMD, we error and retry in PIO.
240 * This can occur if an interrupt is lost or due to hang or bugs.
243 static int dma_timer_expiry(ide_drive_t *drive)
245 ide_hwif_t *hwif = drive->hwif;
246 u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
248 printk(KERN_WARNING "%s: %s: DMA status (0x%02x)\n",
249 drive->name, __func__, dma_stat);
251 if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */
254 hwif->expiry = NULL; /* one free ride for now */
256 if (dma_stat & ATA_DMA_ERR) /* ERROR */
259 if (dma_stat & ATA_DMA_ACTIVE) /* DMAing */
262 if (dma_stat & ATA_DMA_INTR) /* Got an Interrupt */
265 return 0; /* Status is unknown -- reset the bus */
268 void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
270 /* issue cmd to drive */
271 ide_execute_command(drive, command, &ide_dma_intr, 2 * WAIT_CMD,
274 EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
276 void ide_dma_start(ide_drive_t *drive)
278 ide_hwif_t *hwif = drive->hwif;
281 /* Note that this is done *after* the cmd has
282 * been issued to the drive, as per the BM-IDE spec.
283 * The Promise Ultra33 doesn't work correctly when
284 * we do this part before issuing the drive cmd.
286 if (hwif->host_flags & IDE_HFLAG_MMIO) {
287 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
288 writeb(dma_cmd | ATA_DMA_START,
289 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
291 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
292 outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
297 EXPORT_SYMBOL_GPL(ide_dma_start);
299 /* returns 1 on error, 0 otherwise */
300 int ide_dma_end(ide_drive_t *drive)
302 ide_hwif_t *hwif = drive->hwif;
303 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
304 u8 dma_stat = 0, dma_cmd = 0, mask;
306 drive->waiting_for_dma = 0;
310 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
311 writeb(dma_cmd & ~ATA_DMA_START,
312 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
314 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
315 outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
319 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
322 /* clear the INTR & ERROR bits */
323 writeb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
324 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
326 outb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
327 hwif->dma_base + ATA_DMA_STATUS);
329 /* purge DMA mappings */
330 ide_destroy_dmatable(drive);
333 /* verify good DMA status */
334 mask = ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR;
335 if ((dma_stat & mask) != ATA_DMA_INTR)
336 return 0x10 | dma_stat;
339 EXPORT_SYMBOL_GPL(ide_dma_end);
341 /* returns 1 if dma irq issued, 0 otherwise */
342 int ide_dma_test_irq(ide_drive_t *drive)
344 ide_hwif_t *hwif = drive->hwif;
345 u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
347 return (dma_stat & ATA_DMA_INTR) ? 1 : 0;
349 EXPORT_SYMBOL_GPL(ide_dma_test_irq);
351 const struct ide_dma_ops sff_dma_ops = {
352 .dma_host_set = ide_dma_host_set,
353 .dma_setup = ide_dma_setup,
354 .dma_exec_cmd = ide_dma_exec_cmd,
355 .dma_start = ide_dma_start,
356 .dma_end = ide_dma_end,
357 .dma_test_irq = ide_dma_test_irq,
358 .dma_timeout = ide_dma_timeout,
359 .dma_lost_irq = ide_dma_lost_irq,
360 .dma_sff_read_status = ide_dma_sff_read_status,
362 EXPORT_SYMBOL_GPL(sff_dma_ops);