2 * IDE DMA support (including IDE PCI BM-DMA).
4 * Copyright (C) 1995-1998 Mark Lord
5 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
6 * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz
8 * May be copied or modified under the terms of the GNU General Public License
10 * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
14 * Special Thanks to Mark for his Six years of work.
18 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
19 * fixing the problem with the BIOS on some Acer motherboards.
21 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
22 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
24 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
25 * at generic DMA -- his patches were referred to when preparing this code.
27 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
28 * for supplying a Promise UDMA board & WD UDMA drive for this work!
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/kernel.h>
34 #include <linux/timer.h>
36 #include <linux/interrupt.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/ide.h>
40 #include <linux/delay.h>
41 #include <linux/scatterlist.h>
42 #include <linux/dma-mapping.h>
47 static const struct drive_list_entry drive_whitelist [] = {
49 { "Micropolis 2112A" , NULL },
50 { "CONNER CTMA 4000" , NULL },
51 { "CONNER CTT8000-A" , NULL },
52 { "ST34342A" , NULL },
56 static const struct drive_list_entry drive_blacklist [] = {
58 { "WDC AC11000H" , NULL },
59 { "WDC AC22100H" , NULL },
60 { "WDC AC32500H" , NULL },
61 { "WDC AC33100H" , NULL },
62 { "WDC AC31600H" , NULL },
63 { "WDC AC32100H" , "24.09P07" },
64 { "WDC AC23200L" , "21.10N21" },
65 { "Compaq CRD-8241B" , NULL },
66 { "CRD-8400B" , NULL },
67 { "CRD-8480B", NULL },
68 { "CRD-8482B", NULL },
70 { "SanDisk SDP3B" , NULL },
71 { "SanDisk SDP3B-64" , NULL },
72 { "SANYO CD-ROM CRD" , NULL },
73 { "HITACHI CDR-8" , NULL },
74 { "HITACHI CDR-8335" , NULL },
75 { "HITACHI CDR-8435" , NULL },
76 { "Toshiba CD-ROM XM-6202B" , NULL },
77 { "TOSHIBA CD-ROM XM-1702BC", NULL },
78 { "CD-532E-A" , NULL },
79 { "E-IDE CD-ROM CR-840", NULL },
80 { "CD-ROM Drive/F5A", NULL },
81 { "WPI CDD-820", NULL },
82 { "SAMSUNG CD-ROM SC-148C", NULL },
83 { "SAMSUNG CD-ROM SC", NULL },
84 { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL },
85 { "_NEC DV5800A", NULL },
86 { "SAMSUNG CD-ROM SN-124", "N001" },
87 { "Seagate STT20000A", NULL },
88 { "CD-ROM CDR_U200", "1.09" },
94 * ide_dma_intr - IDE DMA interrupt handler
95 * @drive: the drive the interrupt is for
97 * Handle an interrupt completing a read/write DMA transfer on an
101 ide_startstop_t ide_dma_intr (ide_drive_t *drive)
103 u8 stat = 0, dma_stat = 0;
105 dma_stat = drive->hwif->dma_ops->dma_end(drive);
106 stat = ide_read_status(drive);
108 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
110 struct request *rq = HWGROUP(drive)->rq;
112 task_end_request(drive, rq, stat);
115 printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n",
116 drive->name, dma_stat);
118 return ide_error(drive, "dma_intr", stat);
121 EXPORT_SYMBOL_GPL(ide_dma_intr);
123 static int ide_dma_good_drive(ide_drive_t *drive)
125 return ide_in_drive_list(drive->id, drive_whitelist);
129 * ide_build_sglist - map IDE scatter gather for DMA I/O
130 * @drive: the drive to build the DMA table for
131 * @rq: the request holding the sg list
133 * Perform the DMA mapping magic necessary to access the source or
134 * target buffers of a request via DMA. The lower layers of the
135 * kernel provide the necessary cache management so that we can
136 * operate in a portable fashion.
139 int ide_build_sglist(ide_drive_t *drive, struct request *rq)
141 ide_hwif_t *hwif = HWIF(drive);
142 struct scatterlist *sg = hwif->sg_table;
144 ide_map_sg(drive, rq);
146 if (rq_data_dir(rq) == READ)
147 hwif->sg_dma_direction = DMA_FROM_DEVICE;
149 hwif->sg_dma_direction = DMA_TO_DEVICE;
151 return dma_map_sg(hwif->dev, sg, hwif->sg_nents,
152 hwif->sg_dma_direction);
155 EXPORT_SYMBOL_GPL(ide_build_sglist);
157 #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
159 * ide_build_dmatable - build IDE DMA table
161 * ide_build_dmatable() prepares a dma request. We map the command
162 * to get the pci bus addresses of the buffers and then build up
163 * the PRD table that the IDE layer wants to be fed. The code
164 * knows about the 64K wrap bug in the CS5530.
166 * Returns the number of built PRD entries if all went okay,
167 * returns 0 otherwise.
169 * May also be invoked from trm290.c
172 int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
174 ide_hwif_t *hwif = HWIF(drive);
175 unsigned int *table = hwif->dmatable_cpu;
176 unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0;
177 unsigned int count = 0;
179 struct scatterlist *sg;
181 hwif->sg_nents = i = ide_build_sglist(drive, rq);
191 cur_addr = sg_dma_address(sg);
192 cur_len = sg_dma_len(sg);
195 * Fill in the dma table, without crossing any 64kB boundaries.
196 * Most hardware requires 16-bit alignment of all blocks,
197 * but the trm290 requires 32-bit alignment.
201 if (count++ >= PRD_ENTRIES) {
202 printk(KERN_ERR "%s: DMA table too small\n", drive->name);
203 goto use_pio_instead;
205 u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff);
207 if (bcount > cur_len)
209 *table++ = cpu_to_le32(cur_addr);
210 xcount = bcount & 0xffff;
212 xcount = ((xcount >> 2) - 1) << 16;
213 if (xcount == 0x0000) {
215 * Most chipsets correctly interpret a length of 0x0000 as 64KB,
216 * but at least one (e.g. CS5530) misinterprets it as zero (!).
217 * So here we break the 64KB entry into two 32KB entries instead.
219 if (count++ >= PRD_ENTRIES) {
220 printk(KERN_ERR "%s: DMA table too small\n", drive->name);
221 goto use_pio_instead;
223 *table++ = cpu_to_le32(0x8000);
224 *table++ = cpu_to_le32(cur_addr + 0x8000);
227 *table++ = cpu_to_le32(xcount);
239 *--table |= cpu_to_le32(0x80000000);
243 printk(KERN_ERR "%s: empty DMA table?\n", drive->name);
246 ide_destroy_dmatable(drive);
248 return 0; /* revert to PIO for this request */
251 EXPORT_SYMBOL_GPL(ide_build_dmatable);
255 * ide_destroy_dmatable - clean up DMA mapping
256 * @drive: The drive to unmap
258 * Teardown mappings after DMA has completed. This must be called
259 * after the completion of each use of ide_build_dmatable and before
260 * the next use of ide_build_dmatable. Failure to do so will cause
261 * an oops as only one mapping can be live for each target at a given
265 void ide_destroy_dmatable (ide_drive_t *drive)
267 ide_hwif_t *hwif = drive->hwif;
269 dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents,
270 hwif->sg_dma_direction);
273 EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
275 #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
277 * config_drive_for_dma - attempt to activate IDE DMA
278 * @drive: the drive to place in DMA mode
280 * If the drive supports at least mode 2 DMA or UDMA of any kind
281 * then attempt to place it into DMA mode. Drives that are known to
282 * support DMA but predate the DMA properties or that are known
283 * to have DMA handling bugs are also set up appropriately based
284 * on the good/bad drive lists.
287 static int config_drive_for_dma (ide_drive_t *drive)
289 ide_hwif_t *hwif = drive->hwif;
290 struct hd_driveid *id = drive->id;
292 if (drive->media != ide_disk) {
293 if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
298 * Enable DMA on any drive that has
299 * UltraDMA (mode 0/1/2/3/4/5/6) enabled
301 if ((id->field_valid & 4) && ((id->dma_ultra >> 8) & 0x7f))
305 * Enable DMA on any drive that has mode2 DMA
306 * (multi or single) enabled
308 if (id->field_valid & 2) /* regular DMA */
309 if ((id->dma_mword & 0x404) == 0x404 ||
310 (id->dma_1word & 0x404) == 0x404)
313 /* Consult the list of known "good" drives */
314 if (ide_dma_good_drive(drive))
321 * dma_timer_expiry - handle a DMA timeout
322 * @drive: Drive that timed out
324 * An IDE DMA transfer timed out. In the event of an error we ask
325 * the driver to resolve the problem, if a DMA transfer is still
326 * in progress we continue to wait (arguably we need to add a
327 * secondary 'I don't care what the drive thinks' timeout here)
328 * Finally if we have an interrupt we let it complete the I/O.
329 * But only one time - we clear expiry and if it's still not
330 * completed after WAIT_CMD, we error and retry in PIO.
331 * This can occur if an interrupt is lost or due to hang or bugs.
334 static int dma_timer_expiry (ide_drive_t *drive)
336 ide_hwif_t *hwif = HWIF(drive);
337 u8 dma_stat = hwif->read_sff_dma_status(hwif);
339 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
340 drive->name, dma_stat);
342 if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */
345 HWGROUP(drive)->expiry = NULL; /* one free ride for now */
347 /* 1 dmaing, 2 error, 4 intr */
348 if (dma_stat & 2) /* ERROR */
351 if (dma_stat & 1) /* DMAing */
354 if (dma_stat & 4) /* Got an Interrupt */
357 return 0; /* Status is unknown -- reset the bus */
361 * ide_dma_host_set - Enable/disable DMA on a host
362 * @drive: drive to control
364 * Enable/disable DMA on an IDE controller following generic
365 * bus-mastering IDE controller behaviour.
368 void ide_dma_host_set(ide_drive_t *drive, int on)
370 ide_hwif_t *hwif = HWIF(drive);
371 u8 unit = (drive->select.b.unit & 0x01);
372 u8 dma_stat = hwif->read_sff_dma_status(hwif);
375 dma_stat |= (1 << (5 + unit));
377 dma_stat &= ~(1 << (5 + unit));
379 if (hwif->host_flags & IDE_HFLAG_MMIO)
381 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
383 outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
386 EXPORT_SYMBOL_GPL(ide_dma_host_set);
387 #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
390 * ide_dma_off_quietly - Generic DMA kill
391 * @drive: drive to control
393 * Turn off the current DMA on this IDE controller.
396 void ide_dma_off_quietly(ide_drive_t *drive)
398 drive->using_dma = 0;
399 ide_toggle_bounce(drive, 0);
401 drive->hwif->dma_ops->dma_host_set(drive, 0);
404 EXPORT_SYMBOL(ide_dma_off_quietly);
407 * ide_dma_off - disable DMA on a device
408 * @drive: drive to disable DMA on
410 * Disable IDE DMA for a device on this IDE controller.
411 * Inform the user that DMA has been disabled.
414 void ide_dma_off(ide_drive_t *drive)
416 printk(KERN_INFO "%s: DMA disabled\n", drive->name);
417 ide_dma_off_quietly(drive);
420 EXPORT_SYMBOL(ide_dma_off);
423 * ide_dma_on - Enable DMA on a device
424 * @drive: drive to enable DMA on
426 * Enable IDE DMA for a device on this IDE controller.
429 void ide_dma_on(ide_drive_t *drive)
431 drive->using_dma = 1;
432 ide_toggle_bounce(drive, 1);
434 drive->hwif->dma_ops->dma_host_set(drive, 1);
437 #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
439 * ide_dma_setup - begin a DMA phase
440 * @drive: target device
442 * Build an IDE DMA PRD (IDE speak for scatter gather table)
443 * and then set up the DMA transfer registers for a device
444 * that follows generic IDE PCI DMA behaviour. Controllers can
445 * override this function if they need to
447 * Returns 0 on success. If a PIO fallback is required then 1
451 int ide_dma_setup(ide_drive_t *drive)
453 ide_hwif_t *hwif = drive->hwif;
454 struct request *rq = HWGROUP(drive)->rq;
455 unsigned int reading;
456 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
464 /* fall back to pio! */
465 if (!ide_build_dmatable(drive, rq)) {
466 ide_map_sg(drive, rq);
471 if (hwif->host_flags & IDE_HFLAG_MMIO)
472 writel(hwif->dmatable_dma,
473 (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS));
475 outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
479 writeb(reading, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
481 outb(reading, hwif->dma_base + ATA_DMA_CMD);
483 /* read DMA status for INTR & ERROR flags */
484 dma_stat = hwif->read_sff_dma_status(hwif);
486 /* clear INTR & ERROR flags */
489 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
491 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
493 drive->waiting_for_dma = 1;
497 EXPORT_SYMBOL_GPL(ide_dma_setup);
499 void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
501 /* issue cmd to drive */
502 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
504 EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
506 void ide_dma_start(ide_drive_t *drive)
508 ide_hwif_t *hwif = drive->hwif;
511 /* Note that this is done *after* the cmd has
512 * been issued to the drive, as per the BM-IDE spec.
513 * The Promise Ultra33 doesn't work correctly when
514 * we do this part before issuing the drive cmd.
516 if (hwif->host_flags & IDE_HFLAG_MMIO) {
517 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
520 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
522 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
523 outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD);
530 EXPORT_SYMBOL_GPL(ide_dma_start);
532 /* returns 1 on error, 0 otherwise */
533 int __ide_dma_end (ide_drive_t *drive)
535 ide_hwif_t *hwif = drive->hwif;
536 u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
537 u8 dma_stat = 0, dma_cmd = 0;
539 drive->waiting_for_dma = 0;
542 /* get DMA command mode */
543 dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
546 (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
548 dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
549 outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD);
553 dma_stat = hwif->read_sff_dma_status(hwif);
556 /* clear the INTR & ERROR bits */
558 (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
560 outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS);
562 /* purge DMA mappings */
563 ide_destroy_dmatable(drive);
564 /* verify good DMA status */
567 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
570 EXPORT_SYMBOL(__ide_dma_end);
572 /* returns 1 if dma irq issued, 0 otherwise */
573 int ide_dma_test_irq(ide_drive_t *drive)
575 ide_hwif_t *hwif = HWIF(drive);
576 u8 dma_stat = hwif->read_sff_dma_status(hwif);
578 /* return 1 if INTR asserted */
579 if ((dma_stat & 4) == 4)
581 if (!drive->waiting_for_dma)
582 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
583 drive->name, __func__);
586 EXPORT_SYMBOL_GPL(ide_dma_test_irq);
588 static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
589 #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
591 int __ide_dma_bad_drive (ide_drive_t *drive)
593 struct hd_driveid *id = drive->id;
595 int blacklist = ide_in_drive_list(id, drive_blacklist);
597 printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
598 drive->name, id->model);
604 EXPORT_SYMBOL(__ide_dma_bad_drive);
606 static const u8 xfer_mode_bases[] = {
612 static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
614 struct hd_driveid *id = drive->id;
615 ide_hwif_t *hwif = drive->hwif;
616 const struct ide_port_ops *port_ops = hwif->port_ops;
617 unsigned int mask = 0;
621 if ((id->field_valid & 4) == 0)
624 if (port_ops && port_ops->udma_filter)
625 mask = port_ops->udma_filter(drive);
627 mask = hwif->ultra_mask;
628 mask &= id->dma_ultra;
631 * avoid false cable warning from eighty_ninty_three()
633 if (req_mode > XFER_UDMA_2) {
634 if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
639 if ((id->field_valid & 2) == 0)
641 if (port_ops && port_ops->mdma_filter)
642 mask = port_ops->mdma_filter(drive);
644 mask = hwif->mwdma_mask;
645 mask &= id->dma_mword;
648 if (id->field_valid & 2) {
649 mask = id->dma_1word & hwif->swdma_mask;
650 } else if (id->tDMA) {
652 * ide_fix_driveid() doesn't convert ->tDMA to the
653 * CPU endianness so we need to do it here
655 u8 mode = le16_to_cpu(id->tDMA);
658 * if the mode is valid convert it to the mask
659 * (the maximum allowed mode is XFER_SW_DMA_2)
662 mask = ((2 << mode) - 1) & hwif->swdma_mask;
674 * ide_find_dma_mode - compute DMA speed
676 * @req_mode: requested mode
678 * Checks the drive/host capabilities and finds the speed to use for
679 * the DMA transfer. The speed is then limited by the requested mode.
681 * Returns 0 if the drive/host combination is incapable of DMA transfers
682 * or if the requested mode is not a DMA mode.
685 u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
687 ide_hwif_t *hwif = drive->hwif;
692 if (drive->media != ide_disk) {
693 if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
697 for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
698 if (req_mode < xfer_mode_bases[i])
700 mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
703 mode = xfer_mode_bases[i] + x;
708 if (hwif->chipset == ide_acorn && mode == 0) {
712 if (ide_dma_good_drive(drive) && drive->id->eide_dma_time < 150)
713 mode = XFER_MW_DMA_1;
716 mode = min(mode, req_mode);
718 printk(KERN_INFO "%s: %s mode selected\n", drive->name,
719 mode ? ide_xfer_verbose(mode) : "no DMA");
724 EXPORT_SYMBOL_GPL(ide_find_dma_mode);
726 static int ide_tune_dma(ide_drive_t *drive)
728 ide_hwif_t *hwif = drive->hwif;
731 if (drive->nodma || (drive->id->capability & 1) == 0)
734 /* consult the list of known "bad" drives */
735 if (__ide_dma_bad_drive(drive))
738 if (ide_id_dma_bug(drive))
741 if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
742 return config_drive_for_dma(drive);
744 speed = ide_max_dma_mode(drive);
749 if (ide_set_dma_mode(drive, speed))
755 static int ide_dma_check(ide_drive_t *drive)
757 ide_hwif_t *hwif = drive->hwif;
758 int vdma = (hwif->host_flags & IDE_HFLAG_VDMA)? 1 : 0;
760 if (!vdma && ide_tune_dma(drive))
763 /* TODO: always do PIO fallback */
764 if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
767 ide_set_max_pio(drive);
769 return vdma ? 0 : -1;
772 int ide_id_dma_bug(ide_drive_t *drive)
774 struct hd_driveid *id = drive->id;
776 if (id->field_valid & 4) {
777 if ((id->dma_ultra >> 8) && (id->dma_mword >> 8))
779 } else if (id->field_valid & 2) {
780 if ((id->dma_mword >> 8) && (id->dma_1word >> 8))
785 printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
789 int ide_set_dma(ide_drive_t *drive)
794 * Force DMAing for the beginning of the check.
795 * Some chipsets appear to do interesting
796 * things, if not checked and cleared.
799 ide_dma_off_quietly(drive);
801 rc = ide_dma_check(drive);
810 void ide_check_dma_crc(ide_drive_t *drive)
814 ide_dma_off_quietly(drive);
815 drive->crc_count = 0;
816 mode = drive->current_speed;
818 * Don't try non Ultra-DMA modes without iCRC's. Force the
819 * device to PIO and make the user enable SWDMA/MWDMA modes.
821 if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
825 ide_set_xfer_rate(drive, mode);
826 if (drive->current_speed >= XFER_SW_DMA_0)
830 #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
831 void ide_dma_lost_irq (ide_drive_t *drive)
833 printk("%s: DMA interrupt recovery\n", drive->name);
836 EXPORT_SYMBOL(ide_dma_lost_irq);
838 void ide_dma_timeout (ide_drive_t *drive)
840 ide_hwif_t *hwif = HWIF(drive);
842 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
844 if (hwif->dma_ops->dma_test_irq(drive))
847 hwif->dma_ops->dma_end(drive);
850 EXPORT_SYMBOL(ide_dma_timeout);
852 void ide_release_dma_engine(ide_hwif_t *hwif)
854 if (hwif->dmatable_cpu) {
855 struct pci_dev *pdev = to_pci_dev(hwif->dev);
857 pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES,
858 hwif->dmatable_cpu, hwif->dmatable_dma);
859 hwif->dmatable_cpu = NULL;
863 int ide_allocate_dma_engine(ide_hwif_t *hwif)
865 struct pci_dev *pdev = to_pci_dev(hwif->dev);
867 hwif->dmatable_cpu = pci_alloc_consistent(pdev,
868 PRD_ENTRIES * PRD_BYTES,
869 &hwif->dmatable_dma);
871 if (hwif->dmatable_cpu)
874 printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
879 EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
881 static const struct ide_dma_ops sff_dma_ops = {
882 .dma_host_set = ide_dma_host_set,
883 .dma_setup = ide_dma_setup,
884 .dma_exec_cmd = ide_dma_exec_cmd,
885 .dma_start = ide_dma_start,
886 .dma_end = __ide_dma_end,
887 .dma_test_irq = ide_dma_test_irq,
888 .dma_timeout = ide_dma_timeout,
889 .dma_lost_irq = ide_dma_lost_irq,
892 void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
894 hwif->dma_base = base;
896 hwif->dma_ops = &sff_dma_ops;
899 EXPORT_SYMBOL_GPL(ide_setup_dma);
900 #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */