2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
63 /* debounce timing parameters in msecs { interval, duration, timeout } */
64 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
68 static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
71 static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
72 static void ata_dev_xfermask(struct ata_device *dev);
73 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
75 unsigned int ata_print_id = 1;
76 static struct workqueue_struct *ata_wq;
78 struct workqueue_struct *ata_aux_wq;
80 int atapi_enabled = 1;
81 module_param(atapi_enabled, int, 0444);
82 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
85 module_param(atapi_dmadir, int, 0444);
86 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88 int atapi_passthru16 = 1;
89 module_param(atapi_passthru16, int, 0444);
90 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
93 module_param_named(fua, libata_fua, int, 0444);
94 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
96 static int ata_ignore_hpa = 0;
97 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
98 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
100 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
101 module_param(ata_probe_timeout, int, 0444);
102 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
104 int libata_noacpi = 1;
105 module_param_named(noacpi, libata_noacpi, int, 0444);
106 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
108 MODULE_AUTHOR("Jeff Garzik");
109 MODULE_DESCRIPTION("Library module for ATA devices");
110 MODULE_LICENSE("GPL");
111 MODULE_VERSION(DRV_VERSION);
115 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
116 * @tf: Taskfile to convert
117 * @pmp: Port multiplier port
118 * @is_cmd: This FIS is for command
119 * @fis: Buffer into which data will output
121 * Converts a standard ATA taskfile to a Serial ATA
122 * FIS structure (Register - Host to Device).
125 * Inherited from caller.
127 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
129 fis[0] = 0x27; /* Register - Host to Device FIS */
130 fis[1] = pmp & 0xf; /* Port multiplier number*/
132 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
134 fis[2] = tf->command;
135 fis[3] = tf->feature;
142 fis[8] = tf->hob_lbal;
143 fis[9] = tf->hob_lbam;
144 fis[10] = tf->hob_lbah;
145 fis[11] = tf->hob_feature;
148 fis[13] = tf->hob_nsect;
159 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160 * @fis: Buffer from which data will be input
161 * @tf: Taskfile to output
163 * Converts a serial ATA FIS structure to a standard ATA taskfile.
166 * Inherited from caller.
169 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
171 tf->command = fis[2]; /* status */
172 tf->feature = fis[3]; /* error */
179 tf->hob_lbal = fis[8];
180 tf->hob_lbam = fis[9];
181 tf->hob_lbah = fis[10];
184 tf->hob_nsect = fis[13];
187 static const u8 ata_rw_cmds[] = {
191 ATA_CMD_READ_MULTI_EXT,
192 ATA_CMD_WRITE_MULTI_EXT,
196 ATA_CMD_WRITE_MULTI_FUA_EXT,
200 ATA_CMD_PIO_READ_EXT,
201 ATA_CMD_PIO_WRITE_EXT,
214 ATA_CMD_WRITE_FUA_EXT
218 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
219 * @tf: command to examine and configure
220 * @dev: device tf belongs to
222 * Examine the device configuration and tf->flags to calculate
223 * the proper read/write commands and protocol to use.
228 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
232 int index, fua, lba48, write;
234 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
235 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
236 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
238 if (dev->flags & ATA_DFLAG_PIO) {
239 tf->protocol = ATA_PROT_PIO;
240 index = dev->multi_count ? 0 : 8;
241 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
242 /* Unable to use DMA due to host limitation */
243 tf->protocol = ATA_PROT_PIO;
244 index = dev->multi_count ? 0 : 8;
246 tf->protocol = ATA_PROT_DMA;
250 cmd = ata_rw_cmds[index + fua + lba48 + write];
259 * ata_tf_read_block - Read block address from ATA taskfile
260 * @tf: ATA taskfile of interest
261 * @dev: ATA device @tf belongs to
266 * Read block address from @tf. This function can handle all
267 * three address formats - LBA, LBA48 and CHS. tf->protocol and
268 * flags select the address format to use.
271 * Block address read from @tf.
273 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
277 if (tf->flags & ATA_TFLAG_LBA) {
278 if (tf->flags & ATA_TFLAG_LBA48) {
279 block |= (u64)tf->hob_lbah << 40;
280 block |= (u64)tf->hob_lbam << 32;
281 block |= tf->hob_lbal << 24;
283 block |= (tf->device & 0xf) << 24;
285 block |= tf->lbah << 16;
286 block |= tf->lbam << 8;
291 cyl = tf->lbam | (tf->lbah << 8);
292 head = tf->device & 0xf;
295 block = (cyl * dev->heads + head) * dev->sectors + sect;
302 * ata_build_rw_tf - Build ATA taskfile for given read/write request
303 * @tf: Target ATA taskfile
304 * @dev: ATA device @tf belongs to
305 * @block: Block address
306 * @n_block: Number of blocks
307 * @tf_flags: RW/FUA etc...
313 * Build ATA taskfile @tf for read/write request described by
314 * @block, @n_block, @tf_flags and @tag on @dev.
318 * 0 on success, -ERANGE if the request is too large for @dev,
319 * -EINVAL if the request is invalid.
321 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
322 u64 block, u32 n_block, unsigned int tf_flags,
325 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
326 tf->flags |= tf_flags;
328 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
330 if (!lba_48_ok(block, n_block))
333 tf->protocol = ATA_PROT_NCQ;
334 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
336 if (tf->flags & ATA_TFLAG_WRITE)
337 tf->command = ATA_CMD_FPDMA_WRITE;
339 tf->command = ATA_CMD_FPDMA_READ;
341 tf->nsect = tag << 3;
342 tf->hob_feature = (n_block >> 8) & 0xff;
343 tf->feature = n_block & 0xff;
345 tf->hob_lbah = (block >> 40) & 0xff;
346 tf->hob_lbam = (block >> 32) & 0xff;
347 tf->hob_lbal = (block >> 24) & 0xff;
348 tf->lbah = (block >> 16) & 0xff;
349 tf->lbam = (block >> 8) & 0xff;
350 tf->lbal = block & 0xff;
353 if (tf->flags & ATA_TFLAG_FUA)
354 tf->device |= 1 << 7;
355 } else if (dev->flags & ATA_DFLAG_LBA) {
356 tf->flags |= ATA_TFLAG_LBA;
358 if (lba_28_ok(block, n_block)) {
360 tf->device |= (block >> 24) & 0xf;
361 } else if (lba_48_ok(block, n_block)) {
362 if (!(dev->flags & ATA_DFLAG_LBA48))
366 tf->flags |= ATA_TFLAG_LBA48;
368 tf->hob_nsect = (n_block >> 8) & 0xff;
370 tf->hob_lbah = (block >> 40) & 0xff;
371 tf->hob_lbam = (block >> 32) & 0xff;
372 tf->hob_lbal = (block >> 24) & 0xff;
374 /* request too large even for LBA48 */
377 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
380 tf->nsect = n_block & 0xff;
382 tf->lbah = (block >> 16) & 0xff;
383 tf->lbam = (block >> 8) & 0xff;
384 tf->lbal = block & 0xff;
386 tf->device |= ATA_LBA;
389 u32 sect, head, cyl, track;
391 /* The request -may- be too large for CHS addressing. */
392 if (!lba_28_ok(block, n_block))
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
398 /* Convert LBA to CHS */
399 track = (u32)block / dev->sectors;
400 cyl = track / dev->heads;
401 head = track % dev->heads;
402 sect = (u32)block % dev->sectors + 1;
404 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405 (u32)block, track, cyl, head, sect);
407 /* Check whether the converted CHS can fit.
411 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
414 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
425 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426 * @pio_mask: pio_mask
427 * @mwdma_mask: mwdma_mask
428 * @udma_mask: udma_mask
430 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431 * unsigned int xfer_mask.
439 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
440 unsigned int mwdma_mask,
441 unsigned int udma_mask)
443 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
444 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
445 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
449 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450 * @xfer_mask: xfer_mask to unpack
451 * @pio_mask: resulting pio_mask
452 * @mwdma_mask: resulting mwdma_mask
453 * @udma_mask: resulting udma_mask
455 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456 * Any NULL distination masks will be ignored.
458 static void ata_unpack_xfermask(unsigned int xfer_mask,
459 unsigned int *pio_mask,
460 unsigned int *mwdma_mask,
461 unsigned int *udma_mask)
464 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
466 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
468 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
471 static const struct ata_xfer_ent {
475 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
476 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
477 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
482 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483 * @xfer_mask: xfer_mask of interest
485 * Return matching XFER_* value for @xfer_mask. Only the highest
486 * bit of @xfer_mask is considered.
492 * Matching XFER_* value, 0 if no match found.
494 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
496 int highbit = fls(xfer_mask) - 1;
497 const struct ata_xfer_ent *ent;
499 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
500 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
501 return ent->base + highbit - ent->shift;
506 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507 * @xfer_mode: XFER_* of interest
509 * Return matching xfer_mask for @xfer_mode.
515 * Matching xfer_mask, 0 if no match found.
517 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
519 const struct ata_xfer_ent *ent;
521 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
522 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
523 return 1 << (ent->shift + xfer_mode - ent->base);
528 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529 * @xfer_mode: XFER_* of interest
531 * Return matching xfer_shift for @xfer_mode.
537 * Matching xfer_shift, -1 if no match found.
539 static int ata_xfer_mode2shift(unsigned int xfer_mode)
541 const struct ata_xfer_ent *ent;
543 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
544 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
550 * ata_mode_string - convert xfer_mask to string
551 * @xfer_mask: mask of bits supported; only highest bit counts.
553 * Determine string which represents the highest speed
554 * (highest bit in @modemask).
560 * Constant C string representing highest speed listed in
561 * @mode_mask, or the constant C string "<n/a>".
563 static const char *ata_mode_string(unsigned int xfer_mask)
565 static const char * const xfer_mode_str[] = {
589 highbit = fls(xfer_mask) - 1;
590 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
591 return xfer_mode_str[highbit];
595 static const char *sata_spd_string(unsigned int spd)
597 static const char * const spd_str[] = {
602 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
604 return spd_str[spd - 1];
607 void ata_dev_disable(struct ata_device *dev)
609 if (ata_dev_enabled(dev)) {
610 if (ata_msg_drv(dev->link->ap))
611 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
612 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
619 * ata_devchk - PATA device presence detection
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
636 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
638 struct ata_ioports *ioaddr = &ap->ioaddr;
641 ap->ops->dev_select(ap, device);
643 iowrite8(0x55, ioaddr->nsect_addr);
644 iowrite8(0xaa, ioaddr->lbal_addr);
646 iowrite8(0xaa, ioaddr->nsect_addr);
647 iowrite8(0x55, ioaddr->lbal_addr);
649 iowrite8(0x55, ioaddr->nsect_addr);
650 iowrite8(0xaa, ioaddr->lbal_addr);
652 nsect = ioread8(ioaddr->nsect_addr);
653 lbal = ioread8(ioaddr->lbal_addr);
655 if ((nsect == 0x55) && (lbal == 0xaa))
656 return 1; /* we found a device */
658 return 0; /* nothing found */
662 * ata_dev_classify - determine device type based on ATA-spec signature
663 * @tf: ATA taskfile register set for device to be identified
665 * Determine from taskfile register contents whether a device is
666 * ATA or ATAPI, as per "Signature and persistence" section
667 * of ATA/PI spec (volume 1, sect 5.14).
673 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
674 * %ATA_DEV_UNKNOWN the event of failure.
676 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
678 /* Apple's open source Darwin code hints that some devices only
679 * put a proper signature into the LBA mid/high registers,
680 * So, we only check those. It's sufficient for uniqueness.
682 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
683 * signatures for ATA and ATAPI devices attached on SerialATA,
684 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
685 * spec has never mentioned about using different signatures
686 * for ATA/ATAPI devices. Then, Serial ATA II: Port
687 * Multiplier specification began to use 0x69/0x96 to identify
688 * port multpliers and 0x3c/0xc3 to identify SEMB device.
689 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
690 * 0x69/0x96 shortly and described them as reserved for
693 * We follow the current spec and consider that 0x69/0x96
694 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
696 if ((tf->lbam == 0) && (tf->lbah == 0)) {
697 DPRINTK("found ATA device by sig\n");
701 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
702 DPRINTK("found ATAPI device by sig\n");
703 return ATA_DEV_ATAPI;
706 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
707 DPRINTK("found PMP device by sig\n");
711 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
712 printk("ata: SEMB device ignored\n");
713 return ATA_DEV_SEMB_UNSUP; /* not yet */
716 DPRINTK("unknown device\n");
717 return ATA_DEV_UNKNOWN;
721 * ata_dev_try_classify - Parse returned ATA device signature
722 * @dev: ATA device to classify (starting at zero)
723 * @present: device seems present
724 * @r_err: Value of error register on completion
726 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
727 * an ATA/ATAPI-defined set of values is placed in the ATA
728 * shadow registers, indicating the results of device detection
731 * Select the ATA device, and read the values from the ATA shadow
732 * registers. Then parse according to the Error register value,
733 * and the spec-defined values examined by ata_dev_classify().
739 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
741 unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
744 struct ata_port *ap = dev->link->ap;
745 struct ata_taskfile tf;
749 ap->ops->dev_select(ap, dev->devno);
751 memset(&tf, 0, sizeof(tf));
753 ap->ops->tf_read(ap, &tf);
758 /* see if device passed diags: if master then continue and warn later */
759 if (err == 0 && dev->devno == 0)
760 /* diagnostic fail : do nothing _YET_ */
761 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
764 else if ((dev->devno == 0) && (err == 0x81))
769 /* determine if device is ATA or ATAPI */
770 class = ata_dev_classify(&tf);
772 if (class == ATA_DEV_UNKNOWN) {
773 /* If the device failed diagnostic, it's likely to
774 * have reported incorrect device signature too.
775 * Assume ATA device if the device seems present but
776 * device signature is invalid with diagnostic
779 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
782 class = ATA_DEV_NONE;
783 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
784 class = ATA_DEV_NONE;
790 * ata_id_string - Convert IDENTIFY DEVICE page into string
791 * @id: IDENTIFY DEVICE results we will examine
792 * @s: string into which data is output
793 * @ofs: offset into identify device page
794 * @len: length of string to return. must be an even number.
796 * The strings in the IDENTIFY DEVICE page are broken up into
797 * 16-bit chunks. Run through the string, and output each
798 * 8-bit chunk linearly, regardless of platform.
804 void ata_id_string(const u16 *id, unsigned char *s,
805 unsigned int ofs, unsigned int len)
824 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
825 * @id: IDENTIFY DEVICE results we will examine
826 * @s: string into which data is output
827 * @ofs: offset into identify device page
828 * @len: length of string to return. must be an odd number.
830 * This function is identical to ata_id_string except that it
831 * trims trailing spaces and terminates the resulting string with
832 * null. @len must be actual maximum length (even number) + 1.
837 void ata_id_c_string(const u16 *id, unsigned char *s,
838 unsigned int ofs, unsigned int len)
844 ata_id_string(id, s, ofs, len - 1);
846 p = s + strnlen(s, len - 1);
847 while (p > s && p[-1] == ' ')
852 static u64 ata_id_n_sectors(const u16 *id)
854 if (ata_id_has_lba(id)) {
855 if (ata_id_has_lba48(id))
856 return ata_id_u64(id, 100);
858 return ata_id_u32(id, 60);
860 if (ata_id_current_chs_valid(id))
861 return ata_id_u32(id, 57);
863 return id[1] * id[3] * id[6];
867 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
871 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
872 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
873 sectors |= (tf->hob_lbal & 0xff) << 24;
874 sectors |= (tf->lbah & 0xff) << 16;
875 sectors |= (tf->lbam & 0xff) << 8;
876 sectors |= (tf->lbal & 0xff);
881 static u64 ata_tf_to_lba(struct ata_taskfile *tf)
885 sectors |= (tf->device & 0x0f) << 24;
886 sectors |= (tf->lbah & 0xff) << 16;
887 sectors |= (tf->lbam & 0xff) << 8;
888 sectors |= (tf->lbal & 0xff);
894 * ata_read_native_max_address - Read native max address
895 * @dev: target device
896 * @max_sectors: out parameter for the result native max address
898 * Perform an LBA48 or LBA28 native size query upon the device in
902 * 0 on success, -EACCES if command is aborted by the drive.
903 * -EIO on other errors.
905 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
907 unsigned int err_mask;
908 struct ata_taskfile tf;
909 int lba48 = ata_id_has_lba48(dev->id);
911 ata_tf_init(dev, &tf);
913 /* always clear all address registers */
914 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
917 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
918 tf.flags |= ATA_TFLAG_LBA48;
920 tf.command = ATA_CMD_READ_NATIVE_MAX;
922 tf.protocol |= ATA_PROT_NODATA;
923 tf.device |= ATA_LBA;
925 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
927 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
928 "max address (err_mask=0x%x)\n", err_mask);
929 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
935 *max_sectors = ata_tf_to_lba48(&tf);
937 *max_sectors = ata_tf_to_lba(&tf);
938 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
944 * ata_set_max_sectors - Set max sectors
945 * @dev: target device
946 * @new_sectors: new max sectors value to set for the device
948 * Set max sectors of @dev to @new_sectors.
951 * 0 on success, -EACCES if command is aborted or denied (due to
952 * previous non-volatile SET_MAX) by the drive. -EIO on other
955 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
957 unsigned int err_mask;
958 struct ata_taskfile tf;
959 int lba48 = ata_id_has_lba48(dev->id);
963 ata_tf_init(dev, &tf);
965 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
968 tf.command = ATA_CMD_SET_MAX_EXT;
969 tf.flags |= ATA_TFLAG_LBA48;
971 tf.hob_lbal = (new_sectors >> 24) & 0xff;
972 tf.hob_lbam = (new_sectors >> 32) & 0xff;
973 tf.hob_lbah = (new_sectors >> 40) & 0xff;
975 tf.command = ATA_CMD_SET_MAX;
977 tf.device |= (new_sectors >> 24) & 0xf;
980 tf.protocol |= ATA_PROT_NODATA;
981 tf.device |= ATA_LBA;
983 tf.lbal = (new_sectors >> 0) & 0xff;
984 tf.lbam = (new_sectors >> 8) & 0xff;
985 tf.lbah = (new_sectors >> 16) & 0xff;
987 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
989 ata_dev_printk(dev, KERN_WARNING, "failed to set "
990 "max address (err_mask=0x%x)\n", err_mask);
991 if (err_mask == AC_ERR_DEV &&
992 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1001 * ata_hpa_resize - Resize a device with an HPA set
1002 * @dev: Device to resize
1004 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1005 * it if required to the full size of the media. The caller must check
1006 * the drive has the HPA feature set enabled.
1009 * 0 on success, -errno on failure.
1011 static int ata_hpa_resize(struct ata_device *dev)
1013 struct ata_eh_context *ehc = &dev->link->eh_context;
1014 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1015 u64 sectors = ata_id_n_sectors(dev->id);
1019 /* do we need to do it? */
1020 if (dev->class != ATA_DEV_ATA ||
1021 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1022 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1025 /* read native max address */
1026 rc = ata_read_native_max_address(dev, &native_sectors);
1028 /* If HPA isn't going to be unlocked, skip HPA
1029 * resizing from the next try.
1031 if (!ata_ignore_hpa) {
1032 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1033 "broken, will skip HPA handling\n");
1034 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1036 /* we can continue if device aborted the command */
1044 /* nothing to do? */
1045 if (native_sectors <= sectors || !ata_ignore_hpa) {
1046 if (!print_info || native_sectors == sectors)
1049 if (native_sectors > sectors)
1050 ata_dev_printk(dev, KERN_INFO,
1051 "HPA detected: current %llu, native %llu\n",
1052 (unsigned long long)sectors,
1053 (unsigned long long)native_sectors);
1054 else if (native_sectors < sectors)
1055 ata_dev_printk(dev, KERN_WARNING,
1056 "native sectors (%llu) is smaller than "
1058 (unsigned long long)native_sectors,
1059 (unsigned long long)sectors);
1063 /* let's unlock HPA */
1064 rc = ata_set_max_sectors(dev, native_sectors);
1065 if (rc == -EACCES) {
1066 /* if device aborted the command, skip HPA resizing */
1067 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1068 "(%llu -> %llu), skipping HPA handling\n",
1069 (unsigned long long)sectors,
1070 (unsigned long long)native_sectors);
1071 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1076 /* re-read IDENTIFY data */
1077 rc = ata_dev_reread_id(dev, 0);
1079 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1080 "data after HPA resizing\n");
1085 u64 new_sectors = ata_id_n_sectors(dev->id);
1086 ata_dev_printk(dev, KERN_INFO,
1087 "HPA unlocked: %llu -> %llu, native %llu\n",
1088 (unsigned long long)sectors,
1089 (unsigned long long)new_sectors,
1090 (unsigned long long)native_sectors);
1097 * ata_id_to_dma_mode - Identify DMA mode from id block
1098 * @dev: device to identify
1099 * @unknown: mode to assume if we cannot tell
1101 * Set up the timing values for the device based upon the identify
1102 * reported values for the DMA mode. This function is used by drivers
1103 * which rely upon firmware configured modes, but wish to report the
1104 * mode correctly when possible.
1106 * In addition we emit similarly formatted messages to the default
1107 * ata_dev_set_mode handler, in order to provide consistency of
1111 void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1116 /* Pack the DMA modes */
1117 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1118 if (dev->id[53] & 0x04)
1119 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1121 /* Select the mode in use */
1122 mode = ata_xfer_mask2mode(mask);
1125 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1126 ata_mode_string(mask));
1128 /* SWDMA perhaps ? */
1130 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1133 /* Configure the device reporting */
1134 dev->xfer_mode = mode;
1135 dev->xfer_shift = ata_xfer_mode2shift(mode);
1139 * ata_noop_dev_select - Select device 0/1 on ATA bus
1140 * @ap: ATA channel to manipulate
1141 * @device: ATA device (numbered from zero) to select
1143 * This function performs no actual function.
1145 * May be used as the dev_select() entry in ata_port_operations.
1150 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1156 * ata_std_dev_select - Select device 0/1 on ATA bus
1157 * @ap: ATA channel to manipulate
1158 * @device: ATA device (numbered from zero) to select
1160 * Use the method defined in the ATA specification to
1161 * make either device 0, or device 1, active on the
1162 * ATA channel. Works with both PIO and MMIO.
1164 * May be used as the dev_select() entry in ata_port_operations.
1170 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1175 tmp = ATA_DEVICE_OBS;
1177 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1179 iowrite8(tmp, ap->ioaddr.device_addr);
1180 ata_pause(ap); /* needed; also flushes, for mmio */
1184 * ata_dev_select - Select device 0/1 on ATA bus
1185 * @ap: ATA channel to manipulate
1186 * @device: ATA device (numbered from zero) to select
1187 * @wait: non-zero to wait for Status register BSY bit to clear
1188 * @can_sleep: non-zero if context allows sleeping
1190 * Use the method defined in the ATA specification to
1191 * make either device 0, or device 1, active on the
1194 * This is a high-level version of ata_std_dev_select(),
1195 * which additionally provides the services of inserting
1196 * the proper pauses and status polling, where needed.
1202 void ata_dev_select(struct ata_port *ap, unsigned int device,
1203 unsigned int wait, unsigned int can_sleep)
1205 if (ata_msg_probe(ap))
1206 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1207 "device %u, wait %u\n", device, wait);
1212 ap->ops->dev_select(ap, device);
1215 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1222 * ata_dump_id - IDENTIFY DEVICE info debugging output
1223 * @id: IDENTIFY DEVICE page to dump
1225 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1232 static inline void ata_dump_id(const u16 *id)
1234 DPRINTK("49==0x%04x "
1244 DPRINTK("80==0x%04x "
1254 DPRINTK("88==0x%04x "
1261 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1262 * @id: IDENTIFY data to compute xfer mask from
1264 * Compute the xfermask for this device. This is not as trivial
1265 * as it seems if we must consider early devices correctly.
1267 * FIXME: pre IDE drive timing (do we care ?).
1275 static unsigned int ata_id_xfermask(const u16 *id)
1277 unsigned int pio_mask, mwdma_mask, udma_mask;
1279 /* Usual case. Word 53 indicates word 64 is valid */
1280 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1281 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1285 /* If word 64 isn't valid then Word 51 high byte holds
1286 * the PIO timing number for the maximum. Turn it into
1289 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1290 if (mode < 5) /* Valid PIO range */
1291 pio_mask = (2 << mode) - 1;
1295 /* But wait.. there's more. Design your standards by
1296 * committee and you too can get a free iordy field to
1297 * process. However its the speeds not the modes that
1298 * are supported... Note drivers using the timing API
1299 * will get this right anyway
1303 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1305 if (ata_id_is_cfa(id)) {
1307 * Process compact flash extended modes
1309 int pio = id[163] & 0x7;
1310 int dma = (id[163] >> 3) & 7;
1313 pio_mask |= (1 << 5);
1315 pio_mask |= (1 << 6);
1317 mwdma_mask |= (1 << 3);
1319 mwdma_mask |= (1 << 4);
1323 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1324 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1326 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1330 * ata_port_queue_task - Queue port_task
1331 * @ap: The ata_port to queue port_task for
1332 * @fn: workqueue function to be scheduled
1333 * @data: data for @fn to use
1334 * @delay: delay time for workqueue function
1336 * Schedule @fn(@data) for execution after @delay jiffies using
1337 * port_task. There is one port_task per port and it's the
1338 * user(low level driver)'s responsibility to make sure that only
1339 * one task is active at any given time.
1341 * libata core layer takes care of synchronization between
1342 * port_task and EH. ata_port_queue_task() may be ignored for EH
1346 * Inherited from caller.
1348 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1349 unsigned long delay)
1351 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1352 ap->port_task_data = data;
1354 /* may fail if ata_port_flush_task() in progress */
1355 queue_delayed_work(ata_wq, &ap->port_task, delay);
1359 * ata_port_flush_task - Flush port_task
1360 * @ap: The ata_port to flush port_task for
1362 * After this function completes, port_task is guranteed not to
1363 * be running or scheduled.
1366 * Kernel thread context (may sleep)
1368 void ata_port_flush_task(struct ata_port *ap)
1372 cancel_rearming_delayed_work(&ap->port_task);
1374 if (ata_msg_ctl(ap))
1375 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1378 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1380 struct completion *waiting = qc->private_data;
1386 * ata_exec_internal_sg - execute libata internal command
1387 * @dev: Device to which the command is sent
1388 * @tf: Taskfile registers for the command and the result
1389 * @cdb: CDB for packet command
1390 * @dma_dir: Data tranfer direction of the command
1391 * @sg: sg list for the data buffer of the command
1392 * @n_elem: Number of sg entries
1394 * Executes libata internal command with timeout. @tf contains
1395 * command on entry and result on return. Timeout and error
1396 * conditions are reported via return value. No recovery action
1397 * is taken after a command times out. It's caller's duty to
1398 * clean up after timeout.
1401 * None. Should be called with kernel context, might sleep.
1404 * Zero on success, AC_ERR_* mask on failure
1406 unsigned ata_exec_internal_sg(struct ata_device *dev,
1407 struct ata_taskfile *tf, const u8 *cdb,
1408 int dma_dir, struct scatterlist *sg,
1409 unsigned int n_elem)
1411 struct ata_link *link = dev->link;
1412 struct ata_port *ap = link->ap;
1413 u8 command = tf->command;
1414 struct ata_queued_cmd *qc;
1415 unsigned int tag, preempted_tag;
1416 u32 preempted_sactive, preempted_qc_active;
1417 int preempted_nr_active_links;
1418 DECLARE_COMPLETION_ONSTACK(wait);
1419 unsigned long flags;
1420 unsigned int err_mask;
1423 spin_lock_irqsave(ap->lock, flags);
1425 /* no internal command while frozen */
1426 if (ap->pflags & ATA_PFLAG_FROZEN) {
1427 spin_unlock_irqrestore(ap->lock, flags);
1428 return AC_ERR_SYSTEM;
1431 /* initialize internal qc */
1433 /* XXX: Tag 0 is used for drivers with legacy EH as some
1434 * drivers choke if any other tag is given. This breaks
1435 * ata_tag_internal() test for those drivers. Don't use new
1436 * EH stuff without converting to it.
1438 if (ap->ops->error_handler)
1439 tag = ATA_TAG_INTERNAL;
1443 if (test_and_set_bit(tag, &ap->qc_allocated))
1445 qc = __ata_qc_from_tag(ap, tag);
1453 preempted_tag = link->active_tag;
1454 preempted_sactive = link->sactive;
1455 preempted_qc_active = ap->qc_active;
1456 preempted_nr_active_links = ap->nr_active_links;
1457 link->active_tag = ATA_TAG_POISON;
1460 ap->nr_active_links = 0;
1462 /* prepare & issue qc */
1465 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1466 qc->flags |= ATA_QCFLAG_RESULT_TF;
1467 qc->dma_dir = dma_dir;
1468 if (dma_dir != DMA_NONE) {
1469 unsigned int i, buflen = 0;
1471 for (i = 0; i < n_elem; i++)
1472 buflen += sg[i].length;
1474 ata_sg_init(qc, sg, n_elem);
1475 qc->nbytes = buflen;
1478 qc->private_data = &wait;
1479 qc->complete_fn = ata_qc_complete_internal;
1483 spin_unlock_irqrestore(ap->lock, flags);
1485 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1487 ata_port_flush_task(ap);
1490 spin_lock_irqsave(ap->lock, flags);
1492 /* We're racing with irq here. If we lose, the
1493 * following test prevents us from completing the qc
1494 * twice. If we win, the port is frozen and will be
1495 * cleaned up by ->post_internal_cmd().
1497 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1498 qc->err_mask |= AC_ERR_TIMEOUT;
1500 if (ap->ops->error_handler)
1501 ata_port_freeze(ap);
1503 ata_qc_complete(qc);
1505 if (ata_msg_warn(ap))
1506 ata_dev_printk(dev, KERN_WARNING,
1507 "qc timeout (cmd 0x%x)\n", command);
1510 spin_unlock_irqrestore(ap->lock, flags);
1513 /* do post_internal_cmd */
1514 if (ap->ops->post_internal_cmd)
1515 ap->ops->post_internal_cmd(qc);
1517 /* perform minimal error analysis */
1518 if (qc->flags & ATA_QCFLAG_FAILED) {
1519 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1520 qc->err_mask |= AC_ERR_DEV;
1523 qc->err_mask |= AC_ERR_OTHER;
1525 if (qc->err_mask & ~AC_ERR_OTHER)
1526 qc->err_mask &= ~AC_ERR_OTHER;
1530 spin_lock_irqsave(ap->lock, flags);
1532 *tf = qc->result_tf;
1533 err_mask = qc->err_mask;
1536 link->active_tag = preempted_tag;
1537 link->sactive = preempted_sactive;
1538 ap->qc_active = preempted_qc_active;
1539 ap->nr_active_links = preempted_nr_active_links;
1541 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1542 * Until those drivers are fixed, we detect the condition
1543 * here, fail the command with AC_ERR_SYSTEM and reenable the
1546 * Note that this doesn't change any behavior as internal
1547 * command failure results in disabling the device in the
1548 * higher layer for LLDDs without new reset/EH callbacks.
1550 * Kill the following code as soon as those drivers are fixed.
1552 if (ap->flags & ATA_FLAG_DISABLED) {
1553 err_mask |= AC_ERR_SYSTEM;
1557 spin_unlock_irqrestore(ap->lock, flags);
1563 * ata_exec_internal - execute libata internal command
1564 * @dev: Device to which the command is sent
1565 * @tf: Taskfile registers for the command and the result
1566 * @cdb: CDB for packet command
1567 * @dma_dir: Data tranfer direction of the command
1568 * @buf: Data buffer of the command
1569 * @buflen: Length of data buffer
1571 * Wrapper around ata_exec_internal_sg() which takes simple
1572 * buffer instead of sg list.
1575 * None. Should be called with kernel context, might sleep.
1578 * Zero on success, AC_ERR_* mask on failure
1580 unsigned ata_exec_internal(struct ata_device *dev,
1581 struct ata_taskfile *tf, const u8 *cdb,
1582 int dma_dir, void *buf, unsigned int buflen)
1584 struct scatterlist *psg = NULL, sg;
1585 unsigned int n_elem = 0;
1587 if (dma_dir != DMA_NONE) {
1589 sg_init_one(&sg, buf, buflen);
1594 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
1598 * ata_do_simple_cmd - execute simple internal command
1599 * @dev: Device to which the command is sent
1600 * @cmd: Opcode to execute
1602 * Execute a 'simple' command, that only consists of the opcode
1603 * 'cmd' itself, without filling any other registers
1606 * Kernel thread context (may sleep).
1609 * Zero on success, AC_ERR_* mask on failure
1611 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1613 struct ata_taskfile tf;
1615 ata_tf_init(dev, &tf);
1618 tf.flags |= ATA_TFLAG_DEVICE;
1619 tf.protocol = ATA_PROT_NODATA;
1621 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1625 * ata_pio_need_iordy - check if iordy needed
1628 * Check if the current speed of the device requires IORDY. Used
1629 * by various controllers for chip configuration.
1632 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1634 /* Controller doesn't support IORDY. Probably a pointless check
1635 as the caller should know this */
1636 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1638 /* PIO3 and higher it is mandatory */
1639 if (adev->pio_mode > XFER_PIO_2)
1641 /* We turn it on when possible */
1642 if (ata_id_has_iordy(adev->id))
1648 * ata_pio_mask_no_iordy - Return the non IORDY mask
1651 * Compute the highest mode possible if we are not using iordy. Return
1652 * -1 if no iordy mode is available.
1655 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1657 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1658 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1659 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1660 /* Is the speed faster than the drive allows non IORDY ? */
1662 /* This is cycle times not frequency - watch the logic! */
1663 if (pio > 240) /* PIO2 is 240nS per cycle */
1664 return 3 << ATA_SHIFT_PIO;
1665 return 7 << ATA_SHIFT_PIO;
1668 return 3 << ATA_SHIFT_PIO;
1672 * ata_dev_read_id - Read ID data from the specified device
1673 * @dev: target device
1674 * @p_class: pointer to class of the target device (may be changed)
1675 * @flags: ATA_READID_* flags
1676 * @id: buffer to read IDENTIFY data into
1678 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1679 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1680 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1681 * for pre-ATA4 drives.
1683 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1684 * now we abort if we hit that case.
1687 * Kernel thread context (may sleep)
1690 * 0 on success, -errno otherwise.
1692 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1693 unsigned int flags, u16 *id)
1695 struct ata_port *ap = dev->link->ap;
1696 unsigned int class = *p_class;
1697 struct ata_taskfile tf;
1698 unsigned int err_mask = 0;
1700 int may_fallback = 1, tried_spinup = 0;
1703 if (ata_msg_ctl(ap))
1704 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1706 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1708 ata_tf_init(dev, &tf);
1712 tf.command = ATA_CMD_ID_ATA;
1715 tf.command = ATA_CMD_ID_ATAPI;
1719 reason = "unsupported class";
1723 tf.protocol = ATA_PROT_PIO;
1725 /* Some devices choke if TF registers contain garbage. Make
1726 * sure those are properly initialized.
1728 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1730 /* Device presence detection is unreliable on some
1731 * controllers. Always poll IDENTIFY if available.
1733 tf.flags |= ATA_TFLAG_POLLING;
1735 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1736 id, sizeof(id[0]) * ATA_ID_WORDS);
1738 if (err_mask & AC_ERR_NODEV_HINT) {
1739 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1740 ap->print_id, dev->devno);
1744 /* Device or controller might have reported the wrong
1745 * device class. Give a shot at the other IDENTIFY if
1746 * the current one is aborted by the device.
1749 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1752 if (class == ATA_DEV_ATA)
1753 class = ATA_DEV_ATAPI;
1755 class = ATA_DEV_ATA;
1760 reason = "I/O error";
1764 /* Falling back doesn't make sense if ID data was read
1765 * successfully at least once.
1769 swap_buf_le16(id, ATA_ID_WORDS);
1773 reason = "device reports invalid type";
1775 if (class == ATA_DEV_ATA) {
1776 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1779 if (ata_id_is_ata(id))
1783 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1786 * Drive powered-up in standby mode, and requires a specific
1787 * SET_FEATURES spin-up subcommand before it will accept
1788 * anything other than the original IDENTIFY command.
1790 ata_tf_init(dev, &tf);
1791 tf.command = ATA_CMD_SET_FEATURES;
1792 tf.feature = SETFEATURES_SPINUP;
1793 tf.protocol = ATA_PROT_NODATA;
1794 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1795 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1796 if (err_mask && id[2] != 0x738c) {
1798 reason = "SPINUP failed";
1802 * If the drive initially returned incomplete IDENTIFY info,
1803 * we now must reissue the IDENTIFY command.
1805 if (id[2] == 0x37c8)
1809 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1811 * The exact sequence expected by certain pre-ATA4 drives is:
1813 * IDENTIFY (optional in early ATA)
1814 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1816 * Some drives were very specific about that exact sequence.
1818 * Note that ATA4 says lba is mandatory so the second check
1819 * shoud never trigger.
1821 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1822 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1825 reason = "INIT_DEV_PARAMS failed";
1829 /* current CHS translation info (id[53-58]) might be
1830 * changed. reread the identify device info.
1832 flags &= ~ATA_READID_POSTRESET;
1842 if (ata_msg_warn(ap))
1843 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1844 "(%s, err_mask=0x%x)\n", reason, err_mask);
1848 static inline u8 ata_dev_knobble(struct ata_device *dev)
1850 struct ata_port *ap = dev->link->ap;
1851 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1854 static void ata_dev_config_ncq(struct ata_device *dev,
1855 char *desc, size_t desc_sz)
1857 struct ata_port *ap = dev->link->ap;
1858 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1860 if (!ata_id_has_ncq(dev->id)) {
1864 if (dev->horkage & ATA_HORKAGE_NONCQ) {
1865 snprintf(desc, desc_sz, "NCQ (not used)");
1868 if (ap->flags & ATA_FLAG_NCQ) {
1869 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1870 dev->flags |= ATA_DFLAG_NCQ;
1873 if (hdepth >= ddepth)
1874 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1876 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1880 * ata_dev_configure - Configure the specified ATA/ATAPI device
1881 * @dev: Target device to configure
1883 * Configure @dev according to @dev->id. Generic and low-level
1884 * driver specific fixups are also applied.
1887 * Kernel thread context (may sleep)
1890 * 0 on success, -errno otherwise
1892 int ata_dev_configure(struct ata_device *dev)
1894 struct ata_port *ap = dev->link->ap;
1895 struct ata_eh_context *ehc = &dev->link->eh_context;
1896 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1897 const u16 *id = dev->id;
1898 unsigned int xfer_mask;
1899 char revbuf[7]; /* XYZ-99\0 */
1900 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1901 char modelbuf[ATA_ID_PROD_LEN+1];
1904 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1905 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1910 if (ata_msg_probe(ap))
1911 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1914 dev->horkage |= ata_dev_blacklisted(dev);
1916 /* let ACPI work its magic */
1917 rc = ata_acpi_on_devcfg(dev);
1921 /* massage HPA, do it early as it might change IDENTIFY data */
1922 rc = ata_hpa_resize(dev);
1926 /* print device capabilities */
1927 if (ata_msg_probe(ap))
1928 ata_dev_printk(dev, KERN_DEBUG,
1929 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1930 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1932 id[49], id[82], id[83], id[84],
1933 id[85], id[86], id[87], id[88]);
1935 /* initialize to-be-configured parameters */
1936 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1937 dev->max_sectors = 0;
1945 * common ATA, ATAPI feature tests
1948 /* find max transfer mode; for printk only */
1949 xfer_mask = ata_id_xfermask(id);
1951 if (ata_msg_probe(ap))
1954 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1955 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1958 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1961 /* ATA-specific feature tests */
1962 if (dev->class == ATA_DEV_ATA) {
1963 if (ata_id_is_cfa(id)) {
1964 if (id[162] & 1) /* CPRM may make this media unusable */
1965 ata_dev_printk(dev, KERN_WARNING,
1966 "supports DRM functions and may "
1967 "not be fully accessable.\n");
1968 snprintf(revbuf, 7, "CFA");
1971 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1973 dev->n_sectors = ata_id_n_sectors(id);
1975 if (dev->id[59] & 0x100)
1976 dev->multi_count = dev->id[59] & 0xff;
1978 if (ata_id_has_lba(id)) {
1979 const char *lba_desc;
1983 dev->flags |= ATA_DFLAG_LBA;
1984 if (ata_id_has_lba48(id)) {
1985 dev->flags |= ATA_DFLAG_LBA48;
1988 if (dev->n_sectors >= (1UL << 28) &&
1989 ata_id_has_flush_ext(id))
1990 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1994 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1996 /* print device info to dmesg */
1997 if (ata_msg_drv(ap) && print_info) {
1998 ata_dev_printk(dev, KERN_INFO,
1999 "%s: %s, %s, max %s\n",
2000 revbuf, modelbuf, fwrevbuf,
2001 ata_mode_string(xfer_mask));
2002 ata_dev_printk(dev, KERN_INFO,
2003 "%Lu sectors, multi %u: %s %s\n",
2004 (unsigned long long)dev->n_sectors,
2005 dev->multi_count, lba_desc, ncq_desc);
2010 /* Default translation */
2011 dev->cylinders = id[1];
2013 dev->sectors = id[6];
2015 if (ata_id_current_chs_valid(id)) {
2016 /* Current CHS translation is valid. */
2017 dev->cylinders = id[54];
2018 dev->heads = id[55];
2019 dev->sectors = id[56];
2022 /* print device info to dmesg */
2023 if (ata_msg_drv(ap) && print_info) {
2024 ata_dev_printk(dev, KERN_INFO,
2025 "%s: %s, %s, max %s\n",
2026 revbuf, modelbuf, fwrevbuf,
2027 ata_mode_string(xfer_mask));
2028 ata_dev_printk(dev, KERN_INFO,
2029 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2030 (unsigned long long)dev->n_sectors,
2031 dev->multi_count, dev->cylinders,
2032 dev->heads, dev->sectors);
2039 /* ATAPI-specific feature tests */
2040 else if (dev->class == ATA_DEV_ATAPI) {
2041 const char *cdb_intr_string = "";
2042 const char *atapi_an_string = "";
2045 rc = atapi_cdb_len(id);
2046 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2047 if (ata_msg_warn(ap))
2048 ata_dev_printk(dev, KERN_WARNING,
2049 "unsupported CDB len\n");
2053 dev->cdb_len = (unsigned int) rc;
2055 /* Enable ATAPI AN if both the host and device have
2056 * the support. If PMP is attached, SNTF is required
2057 * to enable ATAPI AN to discern between PHY status
2058 * changed notifications and ATAPI ANs.
2060 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2061 (!ap->nr_pmp_links ||
2062 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2063 unsigned int err_mask;
2065 /* issue SET feature command to turn this on */
2066 err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2068 ata_dev_printk(dev, KERN_ERR,
2069 "failed to enable ATAPI AN "
2070 "(err_mask=0x%x)\n", err_mask);
2072 dev->flags |= ATA_DFLAG_AN;
2073 atapi_an_string = ", ATAPI AN";
2077 if (ata_id_cdb_intr(dev->id)) {
2078 dev->flags |= ATA_DFLAG_CDB_INTR;
2079 cdb_intr_string = ", CDB intr";
2082 /* print device info to dmesg */
2083 if (ata_msg_drv(ap) && print_info)
2084 ata_dev_printk(dev, KERN_INFO,
2085 "ATAPI: %s, %s, max %s%s%s\n",
2087 ata_mode_string(xfer_mask),
2088 cdb_intr_string, atapi_an_string);
2091 /* determine max_sectors */
2092 dev->max_sectors = ATA_MAX_SECTORS;
2093 if (dev->flags & ATA_DFLAG_LBA48)
2094 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2096 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2097 /* Let the user know. We don't want to disallow opens for
2098 rescue purposes, or in case the vendor is just a blithering
2101 ata_dev_printk(dev, KERN_WARNING,
2102 "Drive reports diagnostics failure. This may indicate a drive\n");
2103 ata_dev_printk(dev, KERN_WARNING,
2104 "fault or invalid emulation. Contact drive vendor for information.\n");
2108 /* limit bridge transfers to udma5, 200 sectors */
2109 if (ata_dev_knobble(dev)) {
2110 if (ata_msg_drv(ap) && print_info)
2111 ata_dev_printk(dev, KERN_INFO,
2112 "applying bridge limits\n");
2113 dev->udma_mask &= ATA_UDMA5;
2114 dev->max_sectors = ATA_MAX_SECTORS;
2117 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2118 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2121 if (ap->ops->dev_config)
2122 ap->ops->dev_config(dev);
2124 if (ata_msg_probe(ap))
2125 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2126 __FUNCTION__, ata_chk_status(ap));
2130 if (ata_msg_probe(ap))
2131 ata_dev_printk(dev, KERN_DEBUG,
2132 "%s: EXIT, err\n", __FUNCTION__);
2137 * ata_cable_40wire - return 40 wire cable type
2140 * Helper method for drivers which want to hardwire 40 wire cable
2144 int ata_cable_40wire(struct ata_port *ap)
2146 return ATA_CBL_PATA40;
2150 * ata_cable_80wire - return 80 wire cable type
2153 * Helper method for drivers which want to hardwire 80 wire cable
2157 int ata_cable_80wire(struct ata_port *ap)
2159 return ATA_CBL_PATA80;
2163 * ata_cable_unknown - return unknown PATA cable.
2166 * Helper method for drivers which have no PATA cable detection.
2169 int ata_cable_unknown(struct ata_port *ap)
2171 return ATA_CBL_PATA_UNK;
2175 * ata_cable_sata - return SATA cable type
2178 * Helper method for drivers which have SATA cables
2181 int ata_cable_sata(struct ata_port *ap)
2183 return ATA_CBL_SATA;
2187 * ata_bus_probe - Reset and probe ATA bus
2190 * Master ATA bus probing function. Initiates a hardware-dependent
2191 * bus reset, then attempts to identify any devices found on
2195 * PCI/etc. bus probe sem.
2198 * Zero on success, negative errno otherwise.
2201 int ata_bus_probe(struct ata_port *ap)
2203 unsigned int classes[ATA_MAX_DEVICES];
2204 int tries[ATA_MAX_DEVICES];
2206 struct ata_device *dev;
2210 ata_link_for_each_dev(dev, &ap->link)
2211 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2214 /* reset and determine device classes */
2215 ap->ops->phy_reset(ap);
2217 ata_link_for_each_dev(dev, &ap->link) {
2218 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2219 dev->class != ATA_DEV_UNKNOWN)
2220 classes[dev->devno] = dev->class;
2222 classes[dev->devno] = ATA_DEV_NONE;
2224 dev->class = ATA_DEV_UNKNOWN;
2229 /* after the reset the device state is PIO 0 and the controller
2230 state is undefined. Record the mode */
2232 ata_link_for_each_dev(dev, &ap->link)
2233 dev->pio_mode = XFER_PIO_0;
2235 /* read IDENTIFY page and configure devices. We have to do the identify
2236 specific sequence bass-ackwards so that PDIAG- is released by
2239 ata_link_for_each_dev(dev, &ap->link) {
2240 if (tries[dev->devno])
2241 dev->class = classes[dev->devno];
2243 if (!ata_dev_enabled(dev))
2246 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2252 /* Now ask for the cable type as PDIAG- should have been released */
2253 if (ap->ops->cable_detect)
2254 ap->cbl = ap->ops->cable_detect(ap);
2256 /* We may have SATA bridge glue hiding here irrespective of the
2257 reported cable types and sensed types */
2258 ata_link_for_each_dev(dev, &ap->link) {
2259 if (!ata_dev_enabled(dev))
2261 /* SATA drives indicate we have a bridge. We don't know which
2262 end of the link the bridge is which is a problem */
2263 if (ata_id_is_sata(dev->id))
2264 ap->cbl = ATA_CBL_SATA;
2267 /* After the identify sequence we can now set up the devices. We do
2268 this in the normal order so that the user doesn't get confused */
2270 ata_link_for_each_dev(dev, &ap->link) {
2271 if (!ata_dev_enabled(dev))
2274 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2275 rc = ata_dev_configure(dev);
2276 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2281 /* configure transfer mode */
2282 rc = ata_set_mode(&ap->link, &dev);
2286 ata_link_for_each_dev(dev, &ap->link)
2287 if (ata_dev_enabled(dev))
2290 /* no device present, disable port */
2291 ata_port_disable(ap);
2295 tries[dev->devno]--;
2299 /* eeek, something went very wrong, give up */
2300 tries[dev->devno] = 0;
2304 /* give it just one more chance */
2305 tries[dev->devno] = min(tries[dev->devno], 1);
2307 if (tries[dev->devno] == 1) {
2308 /* This is the last chance, better to slow
2309 * down than lose it.
2311 sata_down_spd_limit(&ap->link);
2312 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2316 if (!tries[dev->devno])
2317 ata_dev_disable(dev);
2323 * ata_port_probe - Mark port as enabled
2324 * @ap: Port for which we indicate enablement
2326 * Modify @ap data structure such that the system
2327 * thinks that the entire port is enabled.
2329 * LOCKING: host lock, or some other form of
2333 void ata_port_probe(struct ata_port *ap)
2335 ap->flags &= ~ATA_FLAG_DISABLED;
2339 * sata_print_link_status - Print SATA link status
2340 * @link: SATA link to printk link status about
2342 * This function prints link speed and status of a SATA link.
2347 void sata_print_link_status(struct ata_link *link)
2349 u32 sstatus, scontrol, tmp;
2351 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2353 sata_scr_read(link, SCR_CONTROL, &scontrol);
2355 if (ata_link_online(link)) {
2356 tmp = (sstatus >> 4) & 0xf;
2357 ata_link_printk(link, KERN_INFO,
2358 "SATA link up %s (SStatus %X SControl %X)\n",
2359 sata_spd_string(tmp), sstatus, scontrol);
2361 ata_link_printk(link, KERN_INFO,
2362 "SATA link down (SStatus %X SControl %X)\n",
2368 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2369 * @ap: SATA port associated with target SATA PHY.
2371 * This function issues commands to standard SATA Sxxx
2372 * PHY registers, to wake up the phy (and device), and
2373 * clear any reset condition.
2376 * PCI/etc. bus probe sem.
2379 void __sata_phy_reset(struct ata_port *ap)
2381 struct ata_link *link = &ap->link;
2382 unsigned long timeout = jiffies + (HZ * 5);
2385 if (ap->flags & ATA_FLAG_SATA_RESET) {
2386 /* issue phy wake/reset */
2387 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
2388 /* Couldn't find anything in SATA I/II specs, but
2389 * AHCI-1.1 10.4.2 says at least 1 ms. */
2392 /* phy wake/clear reset */
2393 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
2395 /* wait for phy to become ready, if necessary */
2398 sata_scr_read(link, SCR_STATUS, &sstatus);
2399 if ((sstatus & 0xf) != 1)
2401 } while (time_before(jiffies, timeout));
2403 /* print link status */
2404 sata_print_link_status(link);
2406 /* TODO: phy layer with polling, timeouts, etc. */
2407 if (!ata_link_offline(link))
2410 ata_port_disable(ap);
2412 if (ap->flags & ATA_FLAG_DISABLED)
2415 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2416 ata_port_disable(ap);
2420 ap->cbl = ATA_CBL_SATA;
2424 * sata_phy_reset - Reset SATA bus.
2425 * @ap: SATA port associated with target SATA PHY.
2427 * This function resets the SATA bus, and then probes
2428 * the bus for devices.
2431 * PCI/etc. bus probe sem.
2434 void sata_phy_reset(struct ata_port *ap)
2436 __sata_phy_reset(ap);
2437 if (ap->flags & ATA_FLAG_DISABLED)
2443 * ata_dev_pair - return other device on cable
2446 * Obtain the other device on the same cable, or if none is
2447 * present NULL is returned
2450 struct ata_device *ata_dev_pair(struct ata_device *adev)
2452 struct ata_link *link = adev->link;
2453 struct ata_device *pair = &link->device[1 - adev->devno];
2454 if (!ata_dev_enabled(pair))
2460 * ata_port_disable - Disable port.
2461 * @ap: Port to be disabled.
2463 * Modify @ap data structure such that the system
2464 * thinks that the entire port is disabled, and should
2465 * never attempt to probe or communicate with devices
2468 * LOCKING: host lock, or some other form of
2472 void ata_port_disable(struct ata_port *ap)
2474 ap->link.device[0].class = ATA_DEV_NONE;
2475 ap->link.device[1].class = ATA_DEV_NONE;
2476 ap->flags |= ATA_FLAG_DISABLED;
2480 * sata_down_spd_limit - adjust SATA spd limit downward
2481 * @link: Link to adjust SATA spd limit for
2483 * Adjust SATA spd limit of @link downward. Note that this
2484 * function only adjusts the limit. The change must be applied
2485 * using sata_set_spd().
2488 * Inherited from caller.
2491 * 0 on success, negative errno on failure
2493 int sata_down_spd_limit(struct ata_link *link)
2495 u32 sstatus, spd, mask;
2498 if (!sata_scr_valid(link))
2501 /* If SCR can be read, use it to determine the current SPD.
2502 * If not, use cached value in link->sata_spd.
2504 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2506 spd = (sstatus >> 4) & 0xf;
2508 spd = link->sata_spd;
2510 mask = link->sata_spd_limit;
2514 /* unconditionally mask off the highest bit */
2515 highbit = fls(mask) - 1;
2516 mask &= ~(1 << highbit);
2518 /* Mask off all speeds higher than or equal to the current
2519 * one. Force 1.5Gbps if current SPD is not available.
2522 mask &= (1 << (spd - 1)) - 1;
2526 /* were we already at the bottom? */
2530 link->sata_spd_limit = mask;
2532 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2533 sata_spd_string(fls(mask)));
2538 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2542 if (link->sata_spd_limit == UINT_MAX)
2545 limit = fls(link->sata_spd_limit);
2547 spd = (*scontrol >> 4) & 0xf;
2548 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2550 return spd != limit;
2554 * sata_set_spd_needed - is SATA spd configuration needed
2555 * @link: Link in question
2557 * Test whether the spd limit in SControl matches
2558 * @link->sata_spd_limit. This function is used to determine
2559 * whether hardreset is necessary to apply SATA spd
2563 * Inherited from caller.
2566 * 1 if SATA spd configuration is needed, 0 otherwise.
2568 int sata_set_spd_needed(struct ata_link *link)
2572 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2575 return __sata_set_spd_needed(link, &scontrol);
2579 * sata_set_spd - set SATA spd according to spd limit
2580 * @link: Link to set SATA spd for
2582 * Set SATA spd of @link according to sata_spd_limit.
2585 * Inherited from caller.
2588 * 0 if spd doesn't need to be changed, 1 if spd has been
2589 * changed. Negative errno if SCR registers are inaccessible.
2591 int sata_set_spd(struct ata_link *link)
2596 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2599 if (!__sata_set_spd_needed(link, &scontrol))
2602 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2609 * This mode timing computation functionality is ported over from
2610 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2613 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2614 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2615 * for UDMA6, which is currently supported only by Maxtor drives.
2617 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2620 static const struct ata_timing ata_timing[] = {
2622 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2623 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2624 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2625 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2627 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2628 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2629 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2630 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2631 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2633 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2635 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2636 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2637 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2639 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2640 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2641 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2643 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2644 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2645 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2646 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2648 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2649 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2650 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2652 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2657 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2658 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2660 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2662 q->setup = EZ(t->setup * 1000, T);
2663 q->act8b = EZ(t->act8b * 1000, T);
2664 q->rec8b = EZ(t->rec8b * 1000, T);
2665 q->cyc8b = EZ(t->cyc8b * 1000, T);
2666 q->active = EZ(t->active * 1000, T);
2667 q->recover = EZ(t->recover * 1000, T);
2668 q->cycle = EZ(t->cycle * 1000, T);
2669 q->udma = EZ(t->udma * 1000, UT);
2672 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2673 struct ata_timing *m, unsigned int what)
2675 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2676 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2677 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2678 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2679 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2680 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2681 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2682 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2685 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2687 const struct ata_timing *t;
2689 for (t = ata_timing; t->mode != speed; t++)
2690 if (t->mode == 0xFF)
2695 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2696 struct ata_timing *t, int T, int UT)
2698 const struct ata_timing *s;
2699 struct ata_timing p;
2705 if (!(s = ata_timing_find_mode(speed)))
2708 memcpy(t, s, sizeof(*s));
2711 * If the drive is an EIDE drive, it can tell us it needs extended
2712 * PIO/MW_DMA cycle timing.
2715 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2716 memset(&p, 0, sizeof(p));
2717 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2718 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2719 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2720 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2721 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2723 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2727 * Convert the timing to bus clock counts.
2730 ata_timing_quantize(t, t, T, UT);
2733 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2734 * S.M.A.R.T * and some other commands. We have to ensure that the
2735 * DMA cycle timing is slower/equal than the fastest PIO timing.
2738 if (speed > XFER_PIO_6) {
2739 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2740 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2744 * Lengthen active & recovery time so that cycle time is correct.
2747 if (t->act8b + t->rec8b < t->cyc8b) {
2748 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2749 t->rec8b = t->cyc8b - t->act8b;
2752 if (t->active + t->recover < t->cycle) {
2753 t->active += (t->cycle - (t->active + t->recover)) / 2;
2754 t->recover = t->cycle - t->active;
2757 /* In a few cases quantisation may produce enough errors to
2758 leave t->cycle too low for the sum of active and recovery
2759 if so we must correct this */
2760 if (t->active + t->recover > t->cycle)
2761 t->cycle = t->active + t->recover;
2767 * ata_down_xfermask_limit - adjust dev xfer masks downward
2768 * @dev: Device to adjust xfer masks
2769 * @sel: ATA_DNXFER_* selector
2771 * Adjust xfer masks of @dev downward. Note that this function
2772 * does not apply the change. Invoking ata_set_mode() afterwards
2773 * will apply the limit.
2776 * Inherited from caller.
2779 * 0 on success, negative errno on failure
2781 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2784 unsigned int orig_mask, xfer_mask;
2785 unsigned int pio_mask, mwdma_mask, udma_mask;
2788 quiet = !!(sel & ATA_DNXFER_QUIET);
2789 sel &= ~ATA_DNXFER_QUIET;
2791 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2794 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2797 case ATA_DNXFER_PIO:
2798 highbit = fls(pio_mask) - 1;
2799 pio_mask &= ~(1 << highbit);
2802 case ATA_DNXFER_DMA:
2804 highbit = fls(udma_mask) - 1;
2805 udma_mask &= ~(1 << highbit);
2808 } else if (mwdma_mask) {
2809 highbit = fls(mwdma_mask) - 1;
2810 mwdma_mask &= ~(1 << highbit);
2816 case ATA_DNXFER_40C:
2817 udma_mask &= ATA_UDMA_MASK_40C;
2820 case ATA_DNXFER_FORCE_PIO0:
2822 case ATA_DNXFER_FORCE_PIO:
2831 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2833 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2837 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2838 snprintf(buf, sizeof(buf), "%s:%s",
2839 ata_mode_string(xfer_mask),
2840 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2842 snprintf(buf, sizeof(buf), "%s",
2843 ata_mode_string(xfer_mask));
2845 ata_dev_printk(dev, KERN_WARNING,
2846 "limiting speed to %s\n", buf);
2849 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2855 static int ata_dev_set_mode(struct ata_device *dev)
2857 struct ata_eh_context *ehc = &dev->link->eh_context;
2858 unsigned int err_mask;
2861 dev->flags &= ~ATA_DFLAG_PIO;
2862 if (dev->xfer_shift == ATA_SHIFT_PIO)
2863 dev->flags |= ATA_DFLAG_PIO;
2865 err_mask = ata_dev_set_xfermode(dev);
2866 /* Old CFA may refuse this command, which is just fine */
2867 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2868 err_mask &= ~AC_ERR_DEV;
2869 /* Some very old devices and some bad newer ones fail any kind of
2870 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2871 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2872 dev->pio_mode <= XFER_PIO_2)
2873 err_mask &= ~AC_ERR_DEV;
2875 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2876 "(err_mask=0x%x)\n", err_mask);
2880 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2881 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
2882 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2886 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2887 dev->xfer_shift, (int)dev->xfer_mode);
2889 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2890 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2895 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2896 * @link: link on which timings will be programmed
2897 * @r_failed_dev: out paramter for failed device
2899 * Standard implementation of the function used to tune and set
2900 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2901 * ata_dev_set_mode() fails, pointer to the failing device is
2902 * returned in @r_failed_dev.
2905 * PCI/etc. bus probe sem.
2908 * 0 on success, negative errno otherwise
2911 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2913 struct ata_port *ap = link->ap;
2914 struct ata_device *dev;
2915 int rc = 0, used_dma = 0, found = 0;
2917 /* step 1: calculate xfer_mask */
2918 ata_link_for_each_dev(dev, link) {
2919 unsigned int pio_mask, dma_mask;
2921 if (!ata_dev_enabled(dev))
2924 ata_dev_xfermask(dev);
2926 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2927 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2928 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2929 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2938 /* step 2: always set host PIO timings */
2939 ata_link_for_each_dev(dev, link) {
2940 if (!ata_dev_enabled(dev))
2943 if (!dev->pio_mode) {
2944 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2949 dev->xfer_mode = dev->pio_mode;
2950 dev->xfer_shift = ATA_SHIFT_PIO;
2951 if (ap->ops->set_piomode)
2952 ap->ops->set_piomode(ap, dev);
2955 /* step 3: set host DMA timings */
2956 ata_link_for_each_dev(dev, link) {
2957 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2960 dev->xfer_mode = dev->dma_mode;
2961 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2962 if (ap->ops->set_dmamode)
2963 ap->ops->set_dmamode(ap, dev);
2966 /* step 4: update devices' xfer mode */
2967 ata_link_for_each_dev(dev, link) {
2968 /* don't update suspended devices' xfer mode */
2969 if (!ata_dev_enabled(dev))
2972 rc = ata_dev_set_mode(dev);
2977 /* Record simplex status. If we selected DMA then the other
2978 * host channels are not permitted to do so.
2980 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2981 ap->host->simplex_claimed = ap;
2985 *r_failed_dev = dev;
2990 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2991 * @link: link on which timings will be programmed
2992 * @r_failed_dev: out paramter for failed device
2994 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2995 * ata_set_mode() fails, pointer to the failing device is
2996 * returned in @r_failed_dev.
2999 * PCI/etc. bus probe sem.
3002 * 0 on success, negative errno otherwise
3004 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3006 struct ata_port *ap = link->ap;
3008 /* has private set_mode? */
3009 if (ap->ops->set_mode)
3010 return ap->ops->set_mode(link, r_failed_dev);
3011 return ata_do_set_mode(link, r_failed_dev);
3015 * ata_tf_to_host - issue ATA taskfile to host controller
3016 * @ap: port to which command is being issued
3017 * @tf: ATA taskfile register set
3019 * Issues ATA taskfile register set to ATA host controller,
3020 * with proper synchronization with interrupt handler and
3024 * spin_lock_irqsave(host lock)
3027 static inline void ata_tf_to_host(struct ata_port *ap,
3028 const struct ata_taskfile *tf)
3030 ap->ops->tf_load(ap, tf);
3031 ap->ops->exec_command(ap, tf);
3035 * ata_busy_sleep - sleep until BSY clears, or timeout
3036 * @ap: port containing status register to be polled
3037 * @tmout_pat: impatience timeout
3038 * @tmout: overall timeout
3040 * Sleep until ATA Status register bit BSY clears,
3041 * or a timeout occurs.
3044 * Kernel thread context (may sleep).
3047 * 0 on success, -errno otherwise.
3049 int ata_busy_sleep(struct ata_port *ap,
3050 unsigned long tmout_pat, unsigned long tmout)
3052 unsigned long timer_start, timeout;
3055 status = ata_busy_wait(ap, ATA_BUSY, 300);
3056 timer_start = jiffies;
3057 timeout = timer_start + tmout_pat;
3058 while (status != 0xff && (status & ATA_BUSY) &&
3059 time_before(jiffies, timeout)) {
3061 status = ata_busy_wait(ap, ATA_BUSY, 3);
3064 if (status != 0xff && (status & ATA_BUSY))
3065 ata_port_printk(ap, KERN_WARNING,
3066 "port is slow to respond, please be patient "
3067 "(Status 0x%x)\n", status);
3069 timeout = timer_start + tmout;
3070 while (status != 0xff && (status & ATA_BUSY) &&
3071 time_before(jiffies, timeout)) {
3073 status = ata_chk_status(ap);
3079 if (status & ATA_BUSY) {
3080 ata_port_printk(ap, KERN_ERR, "port failed to respond "
3081 "(%lu secs, Status 0x%x)\n",
3082 tmout / HZ, status);
3090 * ata_wait_ready - sleep until BSY clears, or timeout
3091 * @ap: port containing status register to be polled
3092 * @deadline: deadline jiffies for the operation
3094 * Sleep until ATA Status register bit BSY clears, or timeout
3098 * Kernel thread context (may sleep).
3101 * 0 on success, -errno otherwise.
3103 int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3105 unsigned long start = jiffies;
3109 u8 status = ata_chk_status(ap);
3110 unsigned long now = jiffies;
3112 if (!(status & ATA_BUSY))
3114 if (!ata_link_online(&ap->link) && status == 0xff)
3116 if (time_after(now, deadline))
3119 if (!warned && time_after(now, start + 5 * HZ) &&
3120 (deadline - now > 3 * HZ)) {
3121 ata_port_printk(ap, KERN_WARNING,
3122 "port is slow to respond, please be patient "
3123 "(Status 0x%x)\n", status);
3131 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3132 unsigned long deadline)
3134 struct ata_ioports *ioaddr = &ap->ioaddr;
3135 unsigned int dev0 = devmask & (1 << 0);
3136 unsigned int dev1 = devmask & (1 << 1);
3139 /* if device 0 was found in ata_devchk, wait for its
3143 rc = ata_wait_ready(ap, deadline);
3151 /* if device 1 was found in ata_devchk, wait for register
3152 * access briefly, then wait for BSY to clear.
3157 ap->ops->dev_select(ap, 1);
3159 /* Wait for register access. Some ATAPI devices fail
3160 * to set nsect/lbal after reset, so don't waste too
3161 * much time on it. We're gonna wait for !BSY anyway.
3163 for (i = 0; i < 2; i++) {
3166 nsect = ioread8(ioaddr->nsect_addr);
3167 lbal = ioread8(ioaddr->lbal_addr);
3168 if ((nsect == 1) && (lbal == 1))
3170 msleep(50); /* give drive a breather */
3173 rc = ata_wait_ready(ap, deadline);
3181 /* is all this really necessary? */
3182 ap->ops->dev_select(ap, 0);
3184 ap->ops->dev_select(ap, 1);
3186 ap->ops->dev_select(ap, 0);
3191 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3192 unsigned long deadline)
3194 struct ata_ioports *ioaddr = &ap->ioaddr;
3196 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3198 /* software reset. causes dev0 to be selected */
3199 iowrite8(ap->ctl, ioaddr->ctl_addr);
3200 udelay(20); /* FIXME: flush */
3201 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3202 udelay(20); /* FIXME: flush */
3203 iowrite8(ap->ctl, ioaddr->ctl_addr);
3205 /* spec mandates ">= 2ms" before checking status.
3206 * We wait 150ms, because that was the magic delay used for
3207 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3208 * between when the ATA command register is written, and then
3209 * status is checked. Because waiting for "a while" before
3210 * checking status is fine, post SRST, we perform this magic
3211 * delay here as well.
3213 * Old drivers/ide uses the 2mS rule and then waits for ready
3217 /* Before we perform post reset processing we want to see if
3218 * the bus shows 0xFF because the odd clown forgets the D7
3219 * pulldown resistor.
3221 if (ata_check_status(ap) == 0xFF)
3224 return ata_bus_post_reset(ap, devmask, deadline);
3228 * ata_bus_reset - reset host port and associated ATA channel
3229 * @ap: port to reset
3231 * This is typically the first time we actually start issuing
3232 * commands to the ATA channel. We wait for BSY to clear, then
3233 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3234 * result. Determine what devices, if any, are on the channel
3235 * by looking at the device 0/1 error register. Look at the signature
3236 * stored in each device's taskfile registers, to determine if
3237 * the device is ATA or ATAPI.
3240 * PCI/etc. bus probe sem.
3241 * Obtains host lock.
3244 * Sets ATA_FLAG_DISABLED if bus reset fails.
3247 void ata_bus_reset(struct ata_port *ap)
3249 struct ata_device *device = ap->link.device;
3250 struct ata_ioports *ioaddr = &ap->ioaddr;
3251 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3253 unsigned int dev0, dev1 = 0, devmask = 0;
3256 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3258 /* determine if device 0/1 are present */
3259 if (ap->flags & ATA_FLAG_SATA_RESET)
3262 dev0 = ata_devchk(ap, 0);
3264 dev1 = ata_devchk(ap, 1);
3268 devmask |= (1 << 0);
3270 devmask |= (1 << 1);
3272 /* select device 0 again */
3273 ap->ops->dev_select(ap, 0);
3275 /* issue bus reset */
3276 if (ap->flags & ATA_FLAG_SRST) {
3277 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3278 if (rc && rc != -ENODEV)
3283 * determine by signature whether we have ATA or ATAPI devices
3285 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3286 if ((slave_possible) && (err != 0x81))
3287 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3289 /* is double-select really necessary? */
3290 if (device[1].class != ATA_DEV_NONE)
3291 ap->ops->dev_select(ap, 1);
3292 if (device[0].class != ATA_DEV_NONE)
3293 ap->ops->dev_select(ap, 0);
3295 /* if no devices were detected, disable this port */
3296 if ((device[0].class == ATA_DEV_NONE) &&
3297 (device[1].class == ATA_DEV_NONE))
3300 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3301 /* set up device control for ATA_FLAG_SATA_RESET */
3302 iowrite8(ap->ctl, ioaddr->ctl_addr);
3309 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3310 ata_port_disable(ap);
3316 * sata_link_debounce - debounce SATA phy status
3317 * @link: ATA link to debounce SATA phy status for
3318 * @params: timing parameters { interval, duratinon, timeout } in msec
3319 * @deadline: deadline jiffies for the operation
3321 * Make sure SStatus of @link reaches stable state, determined by
3322 * holding the same value where DET is not 1 for @duration polled
3323 * every @interval, before @timeout. Timeout constraints the
3324 * beginning of the stable state. Because DET gets stuck at 1 on
3325 * some controllers after hot unplugging, this functions waits
3326 * until timeout then returns 0 if DET is stable at 1.
3328 * @timeout is further limited by @deadline. The sooner of the
3332 * Kernel thread context (may sleep)
3335 * 0 on success, -errno on failure.
3337 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3338 unsigned long deadline)
3340 unsigned long interval_msec = params[0];
3341 unsigned long duration = msecs_to_jiffies(params[1]);
3342 unsigned long last_jiffies, t;
3346 t = jiffies + msecs_to_jiffies(params[2]);
3347 if (time_before(t, deadline))
3350 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3355 last_jiffies = jiffies;
3358 msleep(interval_msec);
3359 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3365 if (cur == 1 && time_before(jiffies, deadline))
3367 if (time_after(jiffies, last_jiffies + duration))
3372 /* unstable, start over */
3374 last_jiffies = jiffies;
3376 /* Check deadline. If debouncing failed, return
3377 * -EPIPE to tell upper layer to lower link speed.
3379 if (time_after(jiffies, deadline))
3385 * sata_link_resume - resume SATA link
3386 * @link: ATA link to resume SATA
3387 * @params: timing parameters { interval, duratinon, timeout } in msec
3388 * @deadline: deadline jiffies for the operation
3390 * Resume SATA phy @link and debounce it.
3393 * Kernel thread context (may sleep)
3396 * 0 on success, -errno on failure.
3398 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3399 unsigned long deadline)
3404 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3407 scontrol = (scontrol & 0x0f0) | 0x300;
3409 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3412 /* Some PHYs react badly if SStatus is pounded immediately
3413 * after resuming. Delay 200ms before debouncing.
3417 return sata_link_debounce(link, params, deadline);
3421 * ata_std_prereset - prepare for reset
3422 * @link: ATA link to be reset
3423 * @deadline: deadline jiffies for the operation
3425 * @link is about to be reset. Initialize it. Failure from
3426 * prereset makes libata abort whole reset sequence and give up
3427 * that port, so prereset should be best-effort. It does its
3428 * best to prepare for reset sequence but if things go wrong, it
3429 * should just whine, not fail.
3432 * Kernel thread context (may sleep)
3435 * 0 on success, -errno otherwise.
3437 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3439 struct ata_port *ap = link->ap;
3440 struct ata_eh_context *ehc = &link->eh_context;
3441 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3444 /* handle link resume */
3445 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3446 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3447 ehc->i.action |= ATA_EH_HARDRESET;
3449 /* Some PMPs don't work with only SRST, force hardreset if PMP
3452 if (ap->flags & ATA_FLAG_PMP)
3453 ehc->i.action |= ATA_EH_HARDRESET;
3455 /* if we're about to do hardreset, nothing more to do */
3456 if (ehc->i.action & ATA_EH_HARDRESET)
3459 /* if SATA, resume link */
3460 if (ap->flags & ATA_FLAG_SATA) {
3461 rc = sata_link_resume(link, timing, deadline);
3462 /* whine about phy resume failure but proceed */
3463 if (rc && rc != -EOPNOTSUPP)
3464 ata_link_printk(link, KERN_WARNING, "failed to resume "
3465 "link for reset (errno=%d)\n", rc);
3468 /* Wait for !BSY if the controller can wait for the first D2H
3469 * Reg FIS and we don't know that no device is attached.
3471 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3472 rc = ata_wait_ready(ap, deadline);
3473 if (rc && rc != -ENODEV) {
3474 ata_link_printk(link, KERN_WARNING, "device not ready "
3475 "(errno=%d), forcing hardreset\n", rc);
3476 ehc->i.action |= ATA_EH_HARDRESET;
3484 * ata_std_softreset - reset host port via ATA SRST
3485 * @link: ATA link to reset
3486 * @classes: resulting classes of attached devices
3487 * @deadline: deadline jiffies for the operation
3489 * Reset host port using ATA SRST.
3492 * Kernel thread context (may sleep)
3495 * 0 on success, -errno otherwise.
3497 int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3498 unsigned long deadline)
3500 struct ata_port *ap = link->ap;
3501 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3502 unsigned int devmask = 0;
3508 if (ata_link_offline(link)) {
3509 classes[0] = ATA_DEV_NONE;
3513 /* determine if device 0/1 are present */
3514 if (ata_devchk(ap, 0))
3515 devmask |= (1 << 0);
3516 if (slave_possible && ata_devchk(ap, 1))
3517 devmask |= (1 << 1);
3519 /* select device 0 again */
3520 ap->ops->dev_select(ap, 0);
3522 /* issue bus reset */
3523 DPRINTK("about to softreset, devmask=%x\n", devmask);
3524 rc = ata_bus_softreset(ap, devmask, deadline);
3525 /* if link is occupied, -ENODEV too is an error */
3526 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3527 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3531 /* determine by signature whether we have ATA or ATAPI devices */
3532 classes[0] = ata_dev_try_classify(&link->device[0],
3533 devmask & (1 << 0), &err);
3534 if (slave_possible && err != 0x81)
3535 classes[1] = ata_dev_try_classify(&link->device[1],
3536 devmask & (1 << 1), &err);
3539 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3544 * sata_link_hardreset - reset link via SATA phy reset
3545 * @link: link to reset
3546 * @timing: timing parameters { interval, duratinon, timeout } in msec
3547 * @deadline: deadline jiffies for the operation
3549 * SATA phy-reset @link using DET bits of SControl register.
3552 * Kernel thread context (may sleep)
3555 * 0 on success, -errno otherwise.
3557 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3558 unsigned long deadline)
3565 if (sata_set_spd_needed(link)) {
3566 /* SATA spec says nothing about how to reconfigure
3567 * spd. To be on the safe side, turn off phy during
3568 * reconfiguration. This works for at least ICH7 AHCI
3571 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3574 scontrol = (scontrol & 0x0f0) | 0x304;
3576 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3582 /* issue phy wake/reset */
3583 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3586 scontrol = (scontrol & 0x0f0) | 0x301;
3588 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3591 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3592 * 10.4.2 says at least 1 ms.
3596 /* bring link back */
3597 rc = sata_link_resume(link, timing, deadline);
3599 DPRINTK("EXIT, rc=%d\n", rc);
3604 * sata_std_hardreset - reset host port via SATA phy reset
3605 * @link: link to reset
3606 * @class: resulting class of attached device
3607 * @deadline: deadline jiffies for the operation
3609 * SATA phy-reset host port using DET bits of SControl register,
3610 * wait for !BSY and classify the attached device.
3613 * Kernel thread context (may sleep)
3616 * 0 on success, -errno otherwise.
3618 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3619 unsigned long deadline)
3621 struct ata_port *ap = link->ap;
3622 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3628 rc = sata_link_hardreset(link, timing, deadline);
3630 ata_link_printk(link, KERN_ERR,
3631 "COMRESET failed (errno=%d)\n", rc);
3635 /* TODO: phy layer with polling, timeouts, etc. */
3636 if (ata_link_offline(link)) {
3637 *class = ATA_DEV_NONE;
3638 DPRINTK("EXIT, link offline\n");
3642 /* wait a while before checking status, see SRST for more info */
3645 /* If PMP is supported, we have to do follow-up SRST. Note
3646 * that some PMPs don't send D2H Reg FIS after hardreset at
3647 * all if the first port is empty. Wait for it just for a
3648 * second and request follow-up SRST.
3650 if (ap->flags & ATA_FLAG_PMP) {
3651 ata_wait_ready(ap, jiffies + HZ);
3655 rc = ata_wait_ready(ap, deadline);
3656 /* link occupied, -ENODEV too is an error */
3658 ata_link_printk(link, KERN_ERR,
3659 "COMRESET failed (errno=%d)\n", rc);
3663 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3665 *class = ata_dev_try_classify(link->device, 1, NULL);
3667 DPRINTK("EXIT, class=%u\n", *class);
3672 * ata_std_postreset - standard postreset callback
3673 * @link: the target ata_link
3674 * @classes: classes of attached devices
3676 * This function is invoked after a successful reset. Note that
3677 * the device might have been reset more than once using
3678 * different reset methods before postreset is invoked.
3681 * Kernel thread context (may sleep)
3683 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3685 struct ata_port *ap = link->ap;
3690 /* print link status */
3691 sata_print_link_status(link);
3694 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3695 sata_scr_write(link, SCR_ERROR, serror);
3697 /* is double-select really necessary? */
3698 if (classes[0] != ATA_DEV_NONE)
3699 ap->ops->dev_select(ap, 1);
3700 if (classes[1] != ATA_DEV_NONE)
3701 ap->ops->dev_select(ap, 0);
3703 /* bail out if no device is present */
3704 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3705 DPRINTK("EXIT, no device\n");
3709 /* set up device control */
3710 if (ap->ioaddr.ctl_addr)
3711 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3717 * ata_dev_same_device - Determine whether new ID matches configured device
3718 * @dev: device to compare against
3719 * @new_class: class of the new device
3720 * @new_id: IDENTIFY page of the new device
3722 * Compare @new_class and @new_id against @dev and determine
3723 * whether @dev is the device indicated by @new_class and
3730 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3732 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3735 const u16 *old_id = dev->id;
3736 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3737 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3739 if (dev->class != new_class) {
3740 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3741 dev->class, new_class);
3745 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3746 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3747 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3748 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3750 if (strcmp(model[0], model[1])) {
3751 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3752 "'%s' != '%s'\n", model[0], model[1]);
3756 if (strcmp(serial[0], serial[1])) {
3757 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3758 "'%s' != '%s'\n", serial[0], serial[1]);
3766 * ata_dev_reread_id - Re-read IDENTIFY data
3767 * @dev: target ATA device
3768 * @readid_flags: read ID flags
3770 * Re-read IDENTIFY page and make sure @dev is still attached to
3774 * Kernel thread context (may sleep)
3777 * 0 on success, negative errno otherwise
3779 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3781 unsigned int class = dev->class;
3782 u16 *id = (void *)dev->link->ap->sector_buf;
3786 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3790 /* is the device still there? */
3791 if (!ata_dev_same_device(dev, class, id))
3794 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3799 * ata_dev_revalidate - Revalidate ATA device
3800 * @dev: device to revalidate
3801 * @new_class: new class code
3802 * @readid_flags: read ID flags
3804 * Re-read IDENTIFY page, make sure @dev is still attached to the
3805 * port and reconfigure it according to the new IDENTIFY page.
3808 * Kernel thread context (may sleep)
3811 * 0 on success, negative errno otherwise
3813 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3814 unsigned int readid_flags)
3816 u64 n_sectors = dev->n_sectors;
3819 if (!ata_dev_enabled(dev))
3822 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3823 if (ata_class_enabled(new_class) &&
3824 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3825 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3826 dev->class, new_class);
3832 rc = ata_dev_reread_id(dev, readid_flags);
3836 /* configure device according to the new ID */
3837 rc = ata_dev_configure(dev);
3841 /* verify n_sectors hasn't changed */
3842 if (dev->class == ATA_DEV_ATA && n_sectors &&
3843 dev->n_sectors != n_sectors) {
3844 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3846 (unsigned long long)n_sectors,
3847 (unsigned long long)dev->n_sectors);
3849 /* restore original n_sectors */
3850 dev->n_sectors = n_sectors;
3859 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3863 struct ata_blacklist_entry {
3864 const char *model_num;
3865 const char *model_rev;
3866 unsigned long horkage;
3869 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3870 /* Devices with DMA related problems under Linux */
3871 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3872 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3873 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3874 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3875 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3876 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3877 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3878 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3879 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3880 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3881 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3882 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3883 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3884 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3885 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3886 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3887 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3888 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3889 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3890 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3891 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3892 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3893 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3894 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3895 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3896 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3897 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3898 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3899 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3900 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3901 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
3902 { "IOMEGA ZIP 250 ATAPI Floppy",
3903 NULL, ATA_HORKAGE_NODMA },
3904 /* Odd clown on sil3726/4726 PMPs */
3905 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
3906 ATA_HORKAGE_SKIP_PM },
3908 /* Weird ATAPI devices */
3909 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3911 /* Devices we expect to fail diagnostics */
3913 /* Devices where NCQ should be avoided */
3915 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3916 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3917 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3919 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3920 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3921 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
3922 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
3924 /* Blacklist entries taken from Silicon Image 3124/3132
3925 Windows driver .inf file - also several Linux problem reports */
3926 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3927 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3928 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3929 /* Drives which do spurious command completion */
3930 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
3931 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
3932 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
3933 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3934 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
3935 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3936 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3937 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3938 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
3939 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
3940 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
3942 /* devices which puke on READ_NATIVE_MAX */
3943 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3944 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3945 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3946 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3948 /* Devices which report 1 sector over size HPA */
3949 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3950 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
3956 int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3962 * check for trailing wildcard: *\0
3964 p = strchr(patt, wildchar);
3965 if (p && ((*(p + 1)) == 0))
3970 return strncmp(patt, name, len);
3973 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3975 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3976 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3977 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3979 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3980 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3982 while (ad->model_num) {
3983 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
3984 if (ad->model_rev == NULL)
3986 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
3994 static int ata_dma_blacklisted(const struct ata_device *dev)
3996 /* We don't support polling DMA.
3997 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3998 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4000 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4001 (dev->flags & ATA_DFLAG_CDB_INTR))
4003 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4007 * ata_dev_xfermask - Compute supported xfermask of the given device
4008 * @dev: Device to compute xfermask for
4010 * Compute supported xfermask of @dev and store it in
4011 * dev->*_mask. This function is responsible for applying all
4012 * known limits including host controller limits, device
4018 static void ata_dev_xfermask(struct ata_device *dev)
4020 struct ata_link *link = dev->link;
4021 struct ata_port *ap = link->ap;
4022 struct ata_host *host = ap->host;
4023 unsigned long xfer_mask;
4025 /* controller modes available */
4026 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4027 ap->mwdma_mask, ap->udma_mask);
4029 /* drive modes available */
4030 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4031 dev->mwdma_mask, dev->udma_mask);
4032 xfer_mask &= ata_id_xfermask(dev->id);
4035 * CFA Advanced TrueIDE timings are not allowed on a shared
4038 if (ata_dev_pair(dev)) {
4039 /* No PIO5 or PIO6 */
4040 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4041 /* No MWDMA3 or MWDMA 4 */
4042 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4045 if (ata_dma_blacklisted(dev)) {
4046 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4047 ata_dev_printk(dev, KERN_WARNING,
4048 "device is on DMA blacklist, disabling DMA\n");
4051 if ((host->flags & ATA_HOST_SIMPLEX) &&
4052 host->simplex_claimed && host->simplex_claimed != ap) {
4053 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4054 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4055 "other device, disabling DMA\n");
4058 if (ap->flags & ATA_FLAG_NO_IORDY)
4059 xfer_mask &= ata_pio_mask_no_iordy(dev);
4061 if (ap->ops->mode_filter)
4062 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4064 /* Apply cable rule here. Don't apply it early because when
4065 * we handle hot plug the cable type can itself change.
4066 * Check this last so that we know if the transfer rate was
4067 * solely limited by the cable.
4068 * Unknown or 80 wire cables reported host side are checked
4069 * drive side as well. Cases where we know a 40wire cable
4070 * is used safely for 80 are not checked here.
4072 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4073 /* UDMA/44 or higher would be available */
4074 if((ap->cbl == ATA_CBL_PATA40) ||
4075 (ata_drive_40wire(dev->id) &&
4076 (ap->cbl == ATA_CBL_PATA_UNK ||
4077 ap->cbl == ATA_CBL_PATA80))) {
4078 ata_dev_printk(dev, KERN_WARNING,
4079 "limited to UDMA/33 due to 40-wire cable\n");
4080 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4083 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4084 &dev->mwdma_mask, &dev->udma_mask);
4088 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4089 * @dev: Device to which command will be sent
4091 * Issue SET FEATURES - XFER MODE command to device @dev
4095 * PCI/etc. bus probe sem.
4098 * 0 on success, AC_ERR_* mask otherwise.
4101 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4103 struct ata_taskfile tf;
4104 unsigned int err_mask;
4106 /* set up set-features taskfile */
4107 DPRINTK("set features - xfer mode\n");
4109 /* Some controllers and ATAPI devices show flaky interrupt
4110 * behavior after setting xfer mode. Use polling instead.
4112 ata_tf_init(dev, &tf);
4113 tf.command = ATA_CMD_SET_FEATURES;
4114 tf.feature = SETFEATURES_XFER;
4115 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4116 tf.protocol = ATA_PROT_NODATA;
4117 tf.nsect = dev->xfer_mode;
4119 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4121 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4126 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4127 * @dev: Device to which command will be sent
4128 * @enable: Whether to enable or disable the feature
4130 * Issue SET FEATURES - SATA FEATURES command to device @dev
4131 * on port @ap with sector count set to indicate Asynchronous
4132 * Notification feature
4135 * PCI/etc. bus probe sem.
4138 * 0 on success, AC_ERR_* mask otherwise.
4140 static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4142 struct ata_taskfile tf;
4143 unsigned int err_mask;
4145 /* set up set-features taskfile */
4146 DPRINTK("set features - SATA features\n");
4148 ata_tf_init(dev, &tf);
4149 tf.command = ATA_CMD_SET_FEATURES;
4150 tf.feature = enable;
4151 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4152 tf.protocol = ATA_PROT_NODATA;
4155 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4157 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4162 * ata_dev_init_params - Issue INIT DEV PARAMS command
4163 * @dev: Device to which command will be sent
4164 * @heads: Number of heads (taskfile parameter)
4165 * @sectors: Number of sectors (taskfile parameter)
4168 * Kernel thread context (may sleep)
4171 * 0 on success, AC_ERR_* mask otherwise.
4173 static unsigned int ata_dev_init_params(struct ata_device *dev,
4174 u16 heads, u16 sectors)
4176 struct ata_taskfile tf;
4177 unsigned int err_mask;
4179 /* Number of sectors per track 1-255. Number of heads 1-16 */
4180 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4181 return AC_ERR_INVALID;
4183 /* set up init dev params taskfile */
4184 DPRINTK("init dev params \n");
4186 ata_tf_init(dev, &tf);
4187 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4188 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4189 tf.protocol = ATA_PROT_NODATA;
4191 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4193 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4194 /* A clean abort indicates an original or just out of spec drive
4195 and we should continue as we issue the setup based on the
4196 drive reported working geometry */
4197 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4200 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4205 * ata_sg_clean - Unmap DMA memory associated with command
4206 * @qc: Command containing DMA memory to be released
4208 * Unmap all mapped DMA memory associated with this command.
4211 * spin_lock_irqsave(host lock)
4213 void ata_sg_clean(struct ata_queued_cmd *qc)
4215 struct ata_port *ap = qc->ap;
4216 struct scatterlist *sg = qc->__sg;
4217 int dir = qc->dma_dir;
4218 void *pad_buf = NULL;
4220 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4221 WARN_ON(sg == NULL);
4223 if (qc->flags & ATA_QCFLAG_SINGLE)
4224 WARN_ON(qc->n_elem > 1);
4226 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4228 /* if we padded the buffer out to 32-bit bound, and data
4229 * xfer direction is from-device, we must copy from the
4230 * pad buffer back into the supplied buffer
4232 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4233 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4235 if (qc->flags & ATA_QCFLAG_SG) {
4237 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4238 /* restore last sg */
4239 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4241 struct scatterlist *psg = &qc->pad_sgent;
4242 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4243 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4244 kunmap_atomic(addr, KM_IRQ0);
4248 dma_unmap_single(ap->dev,
4249 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4252 sg->length += qc->pad_len;
4254 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4255 pad_buf, qc->pad_len);
4258 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4263 * ata_fill_sg - Fill PCI IDE PRD table
4264 * @qc: Metadata associated with taskfile to be transferred
4266 * Fill PCI IDE PRD (scatter-gather) table with segments
4267 * associated with the current disk command.
4270 * spin_lock_irqsave(host lock)
4273 static void ata_fill_sg(struct ata_queued_cmd *qc)
4275 struct ata_port *ap = qc->ap;
4276 struct scatterlist *sg;
4279 WARN_ON(qc->__sg == NULL);
4280 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4283 ata_for_each_sg(sg, qc) {
4287 /* determine if physical DMA addr spans 64K boundary.
4288 * Note h/w doesn't support 64-bit, so we unconditionally
4289 * truncate dma_addr_t to u32.
4291 addr = (u32) sg_dma_address(sg);
4292 sg_len = sg_dma_len(sg);
4295 offset = addr & 0xffff;
4297 if ((offset + sg_len) > 0x10000)
4298 len = 0x10000 - offset;
4300 ap->prd[idx].addr = cpu_to_le32(addr);
4301 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4302 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4311 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4315 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4316 * @qc: Metadata associated with taskfile to be transferred
4318 * Fill PCI IDE PRD (scatter-gather) table with segments
4319 * associated with the current disk command. Perform the fill
4320 * so that we avoid writing any length 64K records for
4321 * controllers that don't follow the spec.
4324 * spin_lock_irqsave(host lock)
4327 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4329 struct ata_port *ap = qc->ap;
4330 struct scatterlist *sg;
4333 WARN_ON(qc->__sg == NULL);
4334 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4337 ata_for_each_sg(sg, qc) {
4339 u32 sg_len, len, blen;
4341 /* determine if physical DMA addr spans 64K boundary.
4342 * Note h/w doesn't support 64-bit, so we unconditionally
4343 * truncate dma_addr_t to u32.
4345 addr = (u32) sg_dma_address(sg);
4346 sg_len = sg_dma_len(sg);
4349 offset = addr & 0xffff;
4351 if ((offset + sg_len) > 0x10000)
4352 len = 0x10000 - offset;
4354 blen = len & 0xffff;
4355 ap->prd[idx].addr = cpu_to_le32(addr);
4357 /* Some PATA chipsets like the CS5530 can't
4358 cope with 0x0000 meaning 64K as the spec says */
4359 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4361 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4363 ap->prd[idx].flags_len = cpu_to_le32(blen);
4364 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4373 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4377 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4378 * @qc: Metadata associated with taskfile to check
4380 * Allow low-level driver to filter ATA PACKET commands, returning
4381 * a status indicating whether or not it is OK to use DMA for the
4382 * supplied PACKET command.
4385 * spin_lock_irqsave(host lock)
4387 * RETURNS: 0 when ATAPI DMA can be used
4390 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4392 struct ata_port *ap = qc->ap;
4394 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4395 * few ATAPI devices choke on such DMA requests.
4397 if (unlikely(qc->nbytes & 15))
4400 if (ap->ops->check_atapi_dma)
4401 return ap->ops->check_atapi_dma(qc);
4407 * ata_std_qc_defer - Check whether a qc needs to be deferred
4408 * @qc: ATA command in question
4410 * Non-NCQ commands cannot run with any other command, NCQ or
4411 * not. As upper layer only knows the queue depth, we are
4412 * responsible for maintaining exclusion. This function checks
4413 * whether a new command @qc can be issued.
4416 * spin_lock_irqsave(host lock)
4419 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4421 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4423 struct ata_link *link = qc->dev->link;
4425 if (qc->tf.protocol == ATA_PROT_NCQ) {
4426 if (!ata_tag_valid(link->active_tag))
4429 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4433 return ATA_DEFER_LINK;
4437 * ata_qc_prep - Prepare taskfile for submission
4438 * @qc: Metadata associated with taskfile to be prepared
4440 * Prepare ATA taskfile for submission.
4443 * spin_lock_irqsave(host lock)
4445 void ata_qc_prep(struct ata_queued_cmd *qc)
4447 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4454 * ata_dumb_qc_prep - Prepare taskfile for submission
4455 * @qc: Metadata associated with taskfile to be prepared
4457 * Prepare ATA taskfile for submission.
4460 * spin_lock_irqsave(host lock)
4462 void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4464 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4467 ata_fill_sg_dumb(qc);
4470 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4473 * ata_sg_init_one - Associate command with memory buffer
4474 * @qc: Command to be associated
4475 * @buf: Memory buffer
4476 * @buflen: Length of memory buffer, in bytes.
4478 * Initialize the data-related elements of queued_cmd @qc
4479 * to point to a single memory buffer, @buf of byte length @buflen.
4482 * spin_lock_irqsave(host lock)
4485 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4487 qc->flags |= ATA_QCFLAG_SINGLE;
4489 qc->__sg = &qc->sgent;
4491 qc->orig_n_elem = 1;
4493 qc->nbytes = buflen;
4495 sg_init_one(&qc->sgent, buf, buflen);
4499 * ata_sg_init - Associate command with scatter-gather table.
4500 * @qc: Command to be associated
4501 * @sg: Scatter-gather table.
4502 * @n_elem: Number of elements in s/g table.
4504 * Initialize the data-related elements of queued_cmd @qc
4505 * to point to a scatter-gather table @sg, containing @n_elem
4509 * spin_lock_irqsave(host lock)
4512 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4513 unsigned int n_elem)
4515 qc->flags |= ATA_QCFLAG_SG;
4517 qc->n_elem = n_elem;
4518 qc->orig_n_elem = n_elem;
4522 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4523 * @qc: Command with memory buffer to be mapped.
4525 * DMA-map the memory buffer associated with queued_cmd @qc.
4528 * spin_lock_irqsave(host lock)
4531 * Zero on success, negative on error.
4534 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4536 struct ata_port *ap = qc->ap;
4537 int dir = qc->dma_dir;
4538 struct scatterlist *sg = qc->__sg;
4539 dma_addr_t dma_address;
4542 /* we must lengthen transfers to end on a 32-bit boundary */
4543 qc->pad_len = sg->length & 3;
4545 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4546 struct scatterlist *psg = &qc->pad_sgent;
4548 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4550 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4552 if (qc->tf.flags & ATA_TFLAG_WRITE)
4553 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4556 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4557 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4559 sg->length -= qc->pad_len;
4560 if (sg->length == 0)
4563 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4564 sg->length, qc->pad_len);
4572 dma_address = dma_map_single(ap->dev, qc->buf_virt,
4574 if (dma_mapping_error(dma_address)) {
4576 sg->length += qc->pad_len;
4580 sg_dma_address(sg) = dma_address;
4581 sg_dma_len(sg) = sg->length;
4584 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4585 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4591 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4592 * @qc: Command with scatter-gather table to be mapped.
4594 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4597 * spin_lock_irqsave(host lock)
4600 * Zero on success, negative on error.
4604 static int ata_sg_setup(struct ata_queued_cmd *qc)
4606 struct ata_port *ap = qc->ap;
4607 struct scatterlist *sg = qc->__sg;
4608 struct scatterlist *lsg = &sg[qc->n_elem - 1];
4609 int n_elem, pre_n_elem, dir, trim_sg = 0;
4611 VPRINTK("ENTER, ata%u\n", ap->print_id);
4612 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4614 /* we must lengthen transfers to end on a 32-bit boundary */
4615 qc->pad_len = lsg->length & 3;
4617 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4618 struct scatterlist *psg = &qc->pad_sgent;
4619 unsigned int offset;
4621 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4623 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4626 * psg->page/offset are used to copy to-be-written
4627 * data in this function or read data in ata_sg_clean.
4629 offset = lsg->offset + lsg->length - qc->pad_len;
4630 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4631 psg->offset = offset_in_page(offset);
4633 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4634 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4635 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4636 kunmap_atomic(addr, KM_IRQ0);
4639 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4640 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4642 lsg->length -= qc->pad_len;
4643 if (lsg->length == 0)
4646 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4647 qc->n_elem - 1, lsg->length, qc->pad_len);
4650 pre_n_elem = qc->n_elem;
4651 if (trim_sg && pre_n_elem)
4660 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4662 /* restore last sg */
4663 lsg->length += qc->pad_len;
4667 DPRINTK("%d sg elements mapped\n", n_elem);
4670 qc->n_elem = n_elem;
4676 * swap_buf_le16 - swap halves of 16-bit words in place
4677 * @buf: Buffer to swap
4678 * @buf_words: Number of 16-bit words in buffer.
4680 * Swap halves of 16-bit words if needed to convert from
4681 * little-endian byte order to native cpu byte order, or
4685 * Inherited from caller.
4687 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4692 for (i = 0; i < buf_words; i++)
4693 buf[i] = le16_to_cpu(buf[i]);
4694 #endif /* __BIG_ENDIAN */
4698 * ata_data_xfer - Transfer data by PIO
4699 * @adev: device to target
4701 * @buflen: buffer length
4702 * @write_data: read/write
4704 * Transfer data from/to the device data register by PIO.
4707 * Inherited from caller.
4709 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4710 unsigned int buflen, int write_data)
4712 struct ata_port *ap = adev->link->ap;
4713 unsigned int words = buflen >> 1;
4715 /* Transfer multiple of 2 bytes */
4717 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4719 ioread16_rep(ap->ioaddr.data_addr, buf, words);
4721 /* Transfer trailing 1 byte, if any. */
4722 if (unlikely(buflen & 0x01)) {
4723 u16 align_buf[1] = { 0 };
4724 unsigned char *trailing_buf = buf + buflen - 1;
4727 memcpy(align_buf, trailing_buf, 1);
4728 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4730 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4731 memcpy(trailing_buf, align_buf, 1);
4737 * ata_data_xfer_noirq - Transfer data by PIO
4738 * @adev: device to target
4740 * @buflen: buffer length
4741 * @write_data: read/write
4743 * Transfer data from/to the device data register by PIO. Do the
4744 * transfer with interrupts disabled.
4747 * Inherited from caller.
4749 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4750 unsigned int buflen, int write_data)
4752 unsigned long flags;
4753 local_irq_save(flags);
4754 ata_data_xfer(adev, buf, buflen, write_data);
4755 local_irq_restore(flags);
4760 * ata_pio_sector - Transfer a sector of data.
4761 * @qc: Command on going
4763 * Transfer qc->sect_size bytes of data from/to the ATA device.
4766 * Inherited from caller.
4769 static void ata_pio_sector(struct ata_queued_cmd *qc)
4771 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4772 struct scatterlist *sg = qc->__sg;
4773 struct ata_port *ap = qc->ap;
4775 unsigned int offset;
4778 if (qc->curbytes == qc->nbytes - qc->sect_size)
4779 ap->hsm_task_state = HSM_ST_LAST;
4781 page = sg[qc->cursg].page;
4782 offset = sg[qc->cursg].offset + qc->cursg_ofs;
4784 /* get the current page and offset */
4785 page = nth_page(page, (offset >> PAGE_SHIFT));
4786 offset %= PAGE_SIZE;
4788 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4790 if (PageHighMem(page)) {
4791 unsigned long flags;
4793 /* FIXME: use a bounce buffer */
4794 local_irq_save(flags);
4795 buf = kmap_atomic(page, KM_IRQ0);
4797 /* do the actual data transfer */
4798 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4800 kunmap_atomic(buf, KM_IRQ0);
4801 local_irq_restore(flags);
4803 buf = page_address(page);
4804 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4807 qc->curbytes += qc->sect_size;
4808 qc->cursg_ofs += qc->sect_size;
4810 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4817 * ata_pio_sectors - Transfer one or many sectors.
4818 * @qc: Command on going
4820 * Transfer one or many sectors of data from/to the
4821 * ATA device for the DRQ request.
4824 * Inherited from caller.
4827 static void ata_pio_sectors(struct ata_queued_cmd *qc)
4829 if (is_multi_taskfile(&qc->tf)) {
4830 /* READ/WRITE MULTIPLE */
4833 WARN_ON(qc->dev->multi_count == 0);
4835 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4836 qc->dev->multi_count);
4842 ata_altstatus(qc->ap); /* flush */
4846 * atapi_send_cdb - Write CDB bytes to hardware
4847 * @ap: Port to which ATAPI device is attached.
4848 * @qc: Taskfile currently active
4850 * When device has indicated its readiness to accept
4851 * a CDB, this function is called. Send the CDB.
4857 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4860 DPRINTK("send cdb\n");
4861 WARN_ON(qc->dev->cdb_len < 12);
4863 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4864 ata_altstatus(ap); /* flush */
4866 switch (qc->tf.protocol) {
4867 case ATA_PROT_ATAPI:
4868 ap->hsm_task_state = HSM_ST;
4870 case ATA_PROT_ATAPI_NODATA:
4871 ap->hsm_task_state = HSM_ST_LAST;
4873 case ATA_PROT_ATAPI_DMA:
4874 ap->hsm_task_state = HSM_ST_LAST;
4875 /* initiate bmdma */
4876 ap->ops->bmdma_start(qc);
4882 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4883 * @qc: Command on going
4884 * @bytes: number of bytes
4886 * Transfer Transfer data from/to the ATAPI device.
4889 * Inherited from caller.
4893 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4895 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4896 struct scatterlist *sg = qc->__sg;
4897 struct ata_port *ap = qc->ap;
4900 unsigned int offset, count;
4902 if (qc->curbytes + bytes >= qc->nbytes)
4903 ap->hsm_task_state = HSM_ST_LAST;
4906 if (unlikely(qc->cursg >= qc->n_elem)) {
4908 * The end of qc->sg is reached and the device expects
4909 * more data to transfer. In order not to overrun qc->sg
4910 * and fulfill length specified in the byte count register,
4911 * - for read case, discard trailing data from the device
4912 * - for write case, padding zero data to the device
4914 u16 pad_buf[1] = { 0 };
4915 unsigned int words = bytes >> 1;
4918 if (words) /* warning if bytes > 1 */
4919 ata_dev_printk(qc->dev, KERN_WARNING,
4920 "%u bytes trailing data\n", bytes);
4922 for (i = 0; i < words; i++)
4923 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4925 ap->hsm_task_state = HSM_ST_LAST;
4929 sg = &qc->__sg[qc->cursg];
4932 offset = sg->offset + qc->cursg_ofs;
4934 /* get the current page and offset */
4935 page = nth_page(page, (offset >> PAGE_SHIFT));
4936 offset %= PAGE_SIZE;
4938 /* don't overrun current sg */
4939 count = min(sg->length - qc->cursg_ofs, bytes);
4941 /* don't cross page boundaries */
4942 count = min(count, (unsigned int)PAGE_SIZE - offset);
4944 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4946 if (PageHighMem(page)) {
4947 unsigned long flags;
4949 /* FIXME: use bounce buffer */
4950 local_irq_save(flags);
4951 buf = kmap_atomic(page, KM_IRQ0);
4953 /* do the actual data transfer */
4954 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4956 kunmap_atomic(buf, KM_IRQ0);
4957 local_irq_restore(flags);
4959 buf = page_address(page);
4960 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4964 qc->curbytes += count;
4965 qc->cursg_ofs += count;
4967 if (qc->cursg_ofs == sg->length) {
4977 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4978 * @qc: Command on going
4980 * Transfer Transfer data from/to the ATAPI device.
4983 * Inherited from caller.
4986 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4988 struct ata_port *ap = qc->ap;
4989 struct ata_device *dev = qc->dev;
4990 unsigned int ireason, bc_lo, bc_hi, bytes;
4991 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4993 /* Abuse qc->result_tf for temp storage of intermediate TF
4994 * here to save some kernel stack usage.
4995 * For normal completion, qc->result_tf is not relevant. For
4996 * error, qc->result_tf is later overwritten by ata_qc_complete().
4997 * So, the correctness of qc->result_tf is not affected.
4999 ap->ops->tf_read(ap, &qc->result_tf);
5000 ireason = qc->result_tf.nsect;
5001 bc_lo = qc->result_tf.lbam;
5002 bc_hi = qc->result_tf.lbah;
5003 bytes = (bc_hi << 8) | bc_lo;
5005 /* shall be cleared to zero, indicating xfer of data */
5006 if (ireason & (1 << 0))
5009 /* make sure transfer direction matches expected */
5010 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5011 if (do_write != i_write)
5014 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5016 __atapi_pio_bytes(qc, bytes);
5017 ata_altstatus(ap); /* flush */
5022 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
5023 qc->err_mask |= AC_ERR_HSM;
5024 ap->hsm_task_state = HSM_ST_ERR;
5028 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5029 * @ap: the target ata_port
5033 * 1 if ok in workqueue, 0 otherwise.
5036 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5038 if (qc->tf.flags & ATA_TFLAG_POLLING)
5041 if (ap->hsm_task_state == HSM_ST_FIRST) {
5042 if (qc->tf.protocol == ATA_PROT_PIO &&
5043 (qc->tf.flags & ATA_TFLAG_WRITE))
5046 if (is_atapi_taskfile(&qc->tf) &&
5047 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5055 * ata_hsm_qc_complete - finish a qc running on standard HSM
5056 * @qc: Command to complete
5057 * @in_wq: 1 if called from workqueue, 0 otherwise
5059 * Finish @qc which is running on standard HSM.
5062 * If @in_wq is zero, spin_lock_irqsave(host lock).
5063 * Otherwise, none on entry and grabs host lock.
5065 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5067 struct ata_port *ap = qc->ap;
5068 unsigned long flags;
5070 if (ap->ops->error_handler) {
5072 spin_lock_irqsave(ap->lock, flags);
5074 /* EH might have kicked in while host lock is
5077 qc = ata_qc_from_tag(ap, qc->tag);
5079 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
5080 ap->ops->irq_on(ap);
5081 ata_qc_complete(qc);
5083 ata_port_freeze(ap);
5086 spin_unlock_irqrestore(ap->lock, flags);
5088 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5089 ata_qc_complete(qc);
5091 ata_port_freeze(ap);
5095 spin_lock_irqsave(ap->lock, flags);
5096 ap->ops->irq_on(ap);
5097 ata_qc_complete(qc);
5098 spin_unlock_irqrestore(ap->lock, flags);
5100 ata_qc_complete(qc);
5105 * ata_hsm_move - move the HSM to the next state.
5106 * @ap: the target ata_port
5108 * @status: current device status
5109 * @in_wq: 1 if called from workqueue, 0 otherwise
5112 * 1 when poll next status needed, 0 otherwise.
5114 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5115 u8 status, int in_wq)
5117 unsigned long flags = 0;
5120 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5122 /* Make sure ata_qc_issue_prot() does not throw things
5123 * like DMA polling into the workqueue. Notice that
5124 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5126 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5129 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
5130 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5132 switch (ap->hsm_task_state) {
5134 /* Send first data block or PACKET CDB */
5136 /* If polling, we will stay in the work queue after
5137 * sending the data. Otherwise, interrupt handler
5138 * takes over after sending the data.
5140 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5142 /* check device status */
5143 if (unlikely((status & ATA_DRQ) == 0)) {
5144 /* handle BSY=0, DRQ=0 as error */
5145 if (likely(status & (ATA_ERR | ATA_DF)))
5146 /* device stops HSM for abort/error */
5147 qc->err_mask |= AC_ERR_DEV;
5149 /* HSM violation. Let EH handle this */
5150 qc->err_mask |= AC_ERR_HSM;
5152 ap->hsm_task_state = HSM_ST_ERR;
5156 /* Device should not ask for data transfer (DRQ=1)
5157 * when it finds something wrong.
5158 * We ignore DRQ here and stop the HSM by
5159 * changing hsm_task_state to HSM_ST_ERR and
5160 * let the EH abort the command or reset the device.
5162 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5163 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5164 "error, dev_stat 0x%X\n", status);
5165 qc->err_mask |= AC_ERR_HSM;
5166 ap->hsm_task_state = HSM_ST_ERR;
5170 /* Send the CDB (atapi) or the first data block (ata pio out).
5171 * During the state transition, interrupt handler shouldn't
5172 * be invoked before the data transfer is complete and
5173 * hsm_task_state is changed. Hence, the following locking.
5176 spin_lock_irqsave(ap->lock, flags);
5178 if (qc->tf.protocol == ATA_PROT_PIO) {
5179 /* PIO data out protocol.
5180 * send first data block.
5183 /* ata_pio_sectors() might change the state
5184 * to HSM_ST_LAST. so, the state is changed here
5185 * before ata_pio_sectors().
5187 ap->hsm_task_state = HSM_ST;
5188 ata_pio_sectors(qc);
5191 atapi_send_cdb(ap, qc);
5194 spin_unlock_irqrestore(ap->lock, flags);
5196 /* if polling, ata_pio_task() handles the rest.
5197 * otherwise, interrupt handler takes over from here.
5202 /* complete command or read/write the data register */
5203 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5204 /* ATAPI PIO protocol */
5205 if ((status & ATA_DRQ) == 0) {
5206 /* No more data to transfer or device error.
5207 * Device error will be tagged in HSM_ST_LAST.
5209 ap->hsm_task_state = HSM_ST_LAST;
5213 /* Device should not ask for data transfer (DRQ=1)
5214 * when it finds something wrong.
5215 * We ignore DRQ here and stop the HSM by
5216 * changing hsm_task_state to HSM_ST_ERR and
5217 * let the EH abort the command or reset the device.
5219 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5220 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5221 "device error, dev_stat 0x%X\n",
5223 qc->err_mask |= AC_ERR_HSM;
5224 ap->hsm_task_state = HSM_ST_ERR;
5228 atapi_pio_bytes(qc);
5230 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5231 /* bad ireason reported by device */
5235 /* ATA PIO protocol */
5236 if (unlikely((status & ATA_DRQ) == 0)) {
5237 /* handle BSY=0, DRQ=0 as error */
5238 if (likely(status & (ATA_ERR | ATA_DF)))
5239 /* device stops HSM for abort/error */
5240 qc->err_mask |= AC_ERR_DEV;
5242 /* HSM violation. Let EH handle this.
5243 * Phantom devices also trigger this
5244 * condition. Mark hint.
5246 qc->err_mask |= AC_ERR_HSM |
5249 ap->hsm_task_state = HSM_ST_ERR;
5253 /* For PIO reads, some devices may ask for
5254 * data transfer (DRQ=1) alone with ERR=1.
5255 * We respect DRQ here and transfer one
5256 * block of junk data before changing the
5257 * hsm_task_state to HSM_ST_ERR.
5259 * For PIO writes, ERR=1 DRQ=1 doesn't make
5260 * sense since the data block has been
5261 * transferred to the device.
5263 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5264 /* data might be corrputed */
5265 qc->err_mask |= AC_ERR_DEV;
5267 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5268 ata_pio_sectors(qc);
5269 status = ata_wait_idle(ap);
5272 if (status & (ATA_BUSY | ATA_DRQ))
5273 qc->err_mask |= AC_ERR_HSM;
5275 /* ata_pio_sectors() might change the
5276 * state to HSM_ST_LAST. so, the state
5277 * is changed after ata_pio_sectors().
5279 ap->hsm_task_state = HSM_ST_ERR;
5283 ata_pio_sectors(qc);
5285 if (ap->hsm_task_state == HSM_ST_LAST &&
5286 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5288 status = ata_wait_idle(ap);
5297 if (unlikely(!ata_ok(status))) {
5298 qc->err_mask |= __ac_err_mask(status);
5299 ap->hsm_task_state = HSM_ST_ERR;
5303 /* no more data to transfer */
5304 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5305 ap->print_id, qc->dev->devno, status);
5307 WARN_ON(qc->err_mask);
5309 ap->hsm_task_state = HSM_ST_IDLE;
5311 /* complete taskfile transaction */
5312 ata_hsm_qc_complete(qc, in_wq);
5318 /* make sure qc->err_mask is available to
5319 * know what's wrong and recover
5321 WARN_ON(qc->err_mask == 0);
5323 ap->hsm_task_state = HSM_ST_IDLE;
5325 /* complete taskfile transaction */
5326 ata_hsm_qc_complete(qc, in_wq);
5338 static void ata_pio_task(struct work_struct *work)
5340 struct ata_port *ap =
5341 container_of(work, struct ata_port, port_task.work);
5342 struct ata_queued_cmd *qc = ap->port_task_data;
5347 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5350 * This is purely heuristic. This is a fast path.
5351 * Sometimes when we enter, BSY will be cleared in
5352 * a chk-status or two. If not, the drive is probably seeking
5353 * or something. Snooze for a couple msecs, then
5354 * chk-status again. If still busy, queue delayed work.
5356 status = ata_busy_wait(ap, ATA_BUSY, 5);
5357 if (status & ATA_BUSY) {
5359 status = ata_busy_wait(ap, ATA_BUSY, 10);
5360 if (status & ATA_BUSY) {
5361 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5367 poll_next = ata_hsm_move(ap, qc, status, 1);
5369 /* another command or interrupt handler
5370 * may be running at this point.
5377 * ata_qc_new - Request an available ATA command, for queueing
5378 * @ap: Port associated with device @dev
5379 * @dev: Device from whom we request an available command structure
5385 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5387 struct ata_queued_cmd *qc = NULL;
5390 /* no command while frozen */
5391 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5394 /* the last tag is reserved for internal command. */
5395 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5396 if (!test_and_set_bit(i, &ap->qc_allocated)) {
5397 qc = __ata_qc_from_tag(ap, i);
5408 * ata_qc_new_init - Request an available ATA command, and initialize it
5409 * @dev: Device from whom we request an available command structure
5415 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5417 struct ata_port *ap = dev->link->ap;
5418 struct ata_queued_cmd *qc;
5420 qc = ata_qc_new(ap);
5433 * ata_qc_free - free unused ata_queued_cmd
5434 * @qc: Command to complete
5436 * Designed to free unused ata_queued_cmd object
5437 * in case something prevents using it.
5440 * spin_lock_irqsave(host lock)
5442 void ata_qc_free(struct ata_queued_cmd *qc)
5444 struct ata_port *ap = qc->ap;
5447 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5451 if (likely(ata_tag_valid(tag))) {
5452 qc->tag = ATA_TAG_POISON;
5453 clear_bit(tag, &ap->qc_allocated);
5457 void __ata_qc_complete(struct ata_queued_cmd *qc)
5459 struct ata_port *ap = qc->ap;
5460 struct ata_link *link = qc->dev->link;
5462 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5463 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5465 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5468 /* command should be marked inactive atomically with qc completion */
5469 if (qc->tf.protocol == ATA_PROT_NCQ) {
5470 link->sactive &= ~(1 << qc->tag);
5472 ap->nr_active_links--;
5474 link->active_tag = ATA_TAG_POISON;
5475 ap->nr_active_links--;
5478 /* clear exclusive status */
5479 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5480 ap->excl_link == link))
5481 ap->excl_link = NULL;
5483 /* atapi: mark qc as inactive to prevent the interrupt handler
5484 * from completing the command twice later, before the error handler
5485 * is called. (when rc != 0 and atapi request sense is needed)
5487 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5488 ap->qc_active &= ~(1 << qc->tag);
5490 /* call completion callback */
5491 qc->complete_fn(qc);
5494 static void fill_result_tf(struct ata_queued_cmd *qc)
5496 struct ata_port *ap = qc->ap;
5498 qc->result_tf.flags = qc->tf.flags;
5499 ap->ops->tf_read(ap, &qc->result_tf);
5503 * ata_qc_complete - Complete an active ATA command
5504 * @qc: Command to complete
5505 * @err_mask: ATA Status register contents
5507 * Indicate to the mid and upper layers that an ATA
5508 * command has completed, with either an ok or not-ok status.
5511 * spin_lock_irqsave(host lock)
5513 void ata_qc_complete(struct ata_queued_cmd *qc)
5515 struct ata_port *ap = qc->ap;
5517 /* XXX: New EH and old EH use different mechanisms to
5518 * synchronize EH with regular execution path.
5520 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5521 * Normal execution path is responsible for not accessing a
5522 * failed qc. libata core enforces the rule by returning NULL
5523 * from ata_qc_from_tag() for failed qcs.
5525 * Old EH depends on ata_qc_complete() nullifying completion
5526 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5527 * not synchronize with interrupt handler. Only PIO task is
5530 if (ap->ops->error_handler) {
5531 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5533 if (unlikely(qc->err_mask))
5534 qc->flags |= ATA_QCFLAG_FAILED;
5536 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5537 if (!ata_tag_internal(qc->tag)) {
5538 /* always fill result TF for failed qc */
5540 ata_qc_schedule_eh(qc);
5545 /* read result TF if requested */
5546 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5549 __ata_qc_complete(qc);
5551 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5554 /* read result TF if failed or requested */
5555 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5558 __ata_qc_complete(qc);
5563 * ata_qc_complete_multiple - Complete multiple qcs successfully
5564 * @ap: port in question
5565 * @qc_active: new qc_active mask
5566 * @finish_qc: LLDD callback invoked before completing a qc
5568 * Complete in-flight commands. This functions is meant to be
5569 * called from low-level driver's interrupt routine to complete
5570 * requests normally. ap->qc_active and @qc_active is compared
5571 * and commands are completed accordingly.
5574 * spin_lock_irqsave(host lock)
5577 * Number of completed commands on success, -errno otherwise.
5579 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5580 void (*finish_qc)(struct ata_queued_cmd *))
5586 done_mask = ap->qc_active ^ qc_active;
5588 if (unlikely(done_mask & qc_active)) {
5589 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5590 "(%08x->%08x)\n", ap->qc_active, qc_active);
5594 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5595 struct ata_queued_cmd *qc;
5597 if (!(done_mask & (1 << i)))
5600 if ((qc = ata_qc_from_tag(ap, i))) {
5603 ata_qc_complete(qc);
5611 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5613 struct ata_port *ap = qc->ap;
5615 switch (qc->tf.protocol) {
5618 case ATA_PROT_ATAPI_DMA:
5621 case ATA_PROT_ATAPI:
5623 if (ap->flags & ATA_FLAG_PIO_DMA)
5636 * ata_qc_issue - issue taskfile to device
5637 * @qc: command to issue to device
5639 * Prepare an ATA command to submission to device.
5640 * This includes mapping the data into a DMA-able
5641 * area, filling in the S/G table, and finally
5642 * writing the taskfile to hardware, starting the command.
5645 * spin_lock_irqsave(host lock)
5647 void ata_qc_issue(struct ata_queued_cmd *qc)
5649 struct ata_port *ap = qc->ap;
5650 struct ata_link *link = qc->dev->link;
5652 /* Make sure only one non-NCQ command is outstanding. The
5653 * check is skipped for old EH because it reuses active qc to
5654 * request ATAPI sense.
5656 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5658 if (qc->tf.protocol == ATA_PROT_NCQ) {
5659 WARN_ON(link->sactive & (1 << qc->tag));
5662 ap->nr_active_links++;
5663 link->sactive |= 1 << qc->tag;
5665 WARN_ON(link->sactive);
5667 ap->nr_active_links++;
5668 link->active_tag = qc->tag;
5671 qc->flags |= ATA_QCFLAG_ACTIVE;
5672 ap->qc_active |= 1 << qc->tag;
5674 if (ata_should_dma_map(qc)) {
5675 if (qc->flags & ATA_QCFLAG_SG) {
5676 if (ata_sg_setup(qc))
5678 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5679 if (ata_sg_setup_one(qc))
5683 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5686 ap->ops->qc_prep(qc);
5688 qc->err_mask |= ap->ops->qc_issue(qc);
5689 if (unlikely(qc->err_mask))
5694 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5695 qc->err_mask |= AC_ERR_SYSTEM;
5697 ata_qc_complete(qc);
5701 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5702 * @qc: command to issue to device
5704 * Using various libata functions and hooks, this function
5705 * starts an ATA command. ATA commands are grouped into
5706 * classes called "protocols", and issuing each type of protocol
5707 * is slightly different.
5709 * May be used as the qc_issue() entry in ata_port_operations.
5712 * spin_lock_irqsave(host lock)
5715 * Zero on success, AC_ERR_* mask on failure
5718 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5720 struct ata_port *ap = qc->ap;
5722 /* Use polling pio if the LLD doesn't handle
5723 * interrupt driven pio and atapi CDB interrupt.
5725 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5726 switch (qc->tf.protocol) {
5728 case ATA_PROT_NODATA:
5729 case ATA_PROT_ATAPI:
5730 case ATA_PROT_ATAPI_NODATA:
5731 qc->tf.flags |= ATA_TFLAG_POLLING;
5733 case ATA_PROT_ATAPI_DMA:
5734 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5735 /* see ata_dma_blacklisted() */
5743 /* select the device */
5744 ata_dev_select(ap, qc->dev->devno, 1, 0);
5746 /* start the command */
5747 switch (qc->tf.protocol) {
5748 case ATA_PROT_NODATA:
5749 if (qc->tf.flags & ATA_TFLAG_POLLING)
5750 ata_qc_set_polling(qc);
5752 ata_tf_to_host(ap, &qc->tf);
5753 ap->hsm_task_state = HSM_ST_LAST;
5755 if (qc->tf.flags & ATA_TFLAG_POLLING)
5756 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5761 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5763 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5764 ap->ops->bmdma_setup(qc); /* set up bmdma */
5765 ap->ops->bmdma_start(qc); /* initiate bmdma */
5766 ap->hsm_task_state = HSM_ST_LAST;
5770 if (qc->tf.flags & ATA_TFLAG_POLLING)
5771 ata_qc_set_polling(qc);
5773 ata_tf_to_host(ap, &qc->tf);
5775 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5776 /* PIO data out protocol */
5777 ap->hsm_task_state = HSM_ST_FIRST;
5778 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5780 /* always send first data block using
5781 * the ata_pio_task() codepath.
5784 /* PIO data in protocol */
5785 ap->hsm_task_state = HSM_ST;
5787 if (qc->tf.flags & ATA_TFLAG_POLLING)
5788 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5790 /* if polling, ata_pio_task() handles the rest.
5791 * otherwise, interrupt handler takes over from here.
5797 case ATA_PROT_ATAPI:
5798 case ATA_PROT_ATAPI_NODATA:
5799 if (qc->tf.flags & ATA_TFLAG_POLLING)
5800 ata_qc_set_polling(qc);
5802 ata_tf_to_host(ap, &qc->tf);
5804 ap->hsm_task_state = HSM_ST_FIRST;
5806 /* send cdb by polling if no cdb interrupt */
5807 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5808 (qc->tf.flags & ATA_TFLAG_POLLING))
5809 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5812 case ATA_PROT_ATAPI_DMA:
5813 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5815 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5816 ap->ops->bmdma_setup(qc); /* set up bmdma */
5817 ap->hsm_task_state = HSM_ST_FIRST;
5819 /* send cdb by polling if no cdb interrupt */
5820 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5821 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5826 return AC_ERR_SYSTEM;
5833 * ata_host_intr - Handle host interrupt for given (port, task)
5834 * @ap: Port on which interrupt arrived (possibly...)
5835 * @qc: Taskfile currently active in engine
5837 * Handle host interrupt for given queued command. Currently,
5838 * only DMA interrupts are handled. All other commands are
5839 * handled via polling with interrupts disabled (nIEN bit).
5842 * spin_lock_irqsave(host lock)
5845 * One if interrupt was handled, zero if not (shared irq).
5848 inline unsigned int ata_host_intr (struct ata_port *ap,
5849 struct ata_queued_cmd *qc)
5851 struct ata_eh_info *ehi = &ap->link.eh_info;
5852 u8 status, host_stat = 0;
5854 VPRINTK("ata%u: protocol %d task_state %d\n",
5855 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5857 /* Check whether we are expecting interrupt in this state */
5858 switch (ap->hsm_task_state) {
5860 /* Some pre-ATAPI-4 devices assert INTRQ
5861 * at this state when ready to receive CDB.
5864 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5865 * The flag was turned on only for atapi devices.
5866 * No need to check is_atapi_taskfile(&qc->tf) again.
5868 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5872 if (qc->tf.protocol == ATA_PROT_DMA ||
5873 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5874 /* check status of DMA engine */
5875 host_stat = ap->ops->bmdma_status(ap);
5876 VPRINTK("ata%u: host_stat 0x%X\n",
5877 ap->print_id, host_stat);
5879 /* if it's not our irq... */
5880 if (!(host_stat & ATA_DMA_INTR))
5883 /* before we do anything else, clear DMA-Start bit */
5884 ap->ops->bmdma_stop(qc);
5886 if (unlikely(host_stat & ATA_DMA_ERR)) {
5887 /* error when transfering data to/from memory */
5888 qc->err_mask |= AC_ERR_HOST_BUS;
5889 ap->hsm_task_state = HSM_ST_ERR;
5899 /* check altstatus */
5900 status = ata_altstatus(ap);
5901 if (status & ATA_BUSY)
5904 /* check main status, clearing INTRQ */
5905 status = ata_chk_status(ap);
5906 if (unlikely(status & ATA_BUSY))
5909 /* ack bmdma irq events */
5910 ap->ops->irq_clear(ap);
5912 ata_hsm_move(ap, qc, status, 0);
5914 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5915 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5916 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5918 return 1; /* irq handled */
5921 ap->stats.idle_irq++;
5924 if ((ap->stats.idle_irq % 1000) == 0) {
5926 ap->ops->irq_clear(ap);
5927 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5931 return 0; /* irq not handled */
5935 * ata_interrupt - Default ATA host interrupt handler
5936 * @irq: irq line (unused)
5937 * @dev_instance: pointer to our ata_host information structure
5939 * Default interrupt handler for PCI IDE devices. Calls
5940 * ata_host_intr() for each port that is not disabled.
5943 * Obtains host lock during operation.
5946 * IRQ_NONE or IRQ_HANDLED.
5949 irqreturn_t ata_interrupt (int irq, void *dev_instance)
5951 struct ata_host *host = dev_instance;
5953 unsigned int handled = 0;
5954 unsigned long flags;
5956 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5957 spin_lock_irqsave(&host->lock, flags);
5959 for (i = 0; i < host->n_ports; i++) {
5960 struct ata_port *ap;
5962 ap = host->ports[i];
5964 !(ap->flags & ATA_FLAG_DISABLED)) {
5965 struct ata_queued_cmd *qc;
5967 qc = ata_qc_from_tag(ap, ap->link.active_tag);
5968 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5969 (qc->flags & ATA_QCFLAG_ACTIVE))
5970 handled |= ata_host_intr(ap, qc);
5974 spin_unlock_irqrestore(&host->lock, flags);
5976 return IRQ_RETVAL(handled);
5980 * sata_scr_valid - test whether SCRs are accessible
5981 * @link: ATA link to test SCR accessibility for
5983 * Test whether SCRs are accessible for @link.
5989 * 1 if SCRs are accessible, 0 otherwise.
5991 int sata_scr_valid(struct ata_link *link)
5993 struct ata_port *ap = link->ap;
5995 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5999 * sata_scr_read - read SCR register of the specified port
6000 * @link: ATA link to read SCR for
6002 * @val: Place to store read value
6004 * Read SCR register @reg of @link into *@val. This function is
6005 * guaranteed to succeed if @link is ap->link, the cable type of
6006 * the port is SATA and the port implements ->scr_read.
6009 * None if @link is ap->link. Kernel thread context otherwise.
6012 * 0 on success, negative errno on failure.
6014 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6016 if (ata_is_host_link(link)) {
6017 struct ata_port *ap = link->ap;
6019 if (sata_scr_valid(link))
6020 return ap->ops->scr_read(ap, reg, val);
6024 return sata_pmp_scr_read(link, reg, val);
6028 * sata_scr_write - write SCR register of the specified port
6029 * @link: ATA link to write SCR for
6030 * @reg: SCR to write
6031 * @val: value to write
6033 * Write @val to SCR register @reg of @link. This function is
6034 * guaranteed to succeed if @link is ap->link, the cable type of
6035 * the port is SATA and the port implements ->scr_read.
6038 * None if @link is ap->link. Kernel thread context otherwise.
6041 * 0 on success, negative errno on failure.
6043 int sata_scr_write(struct ata_link *link, int reg, u32 val)
6045 if (ata_is_host_link(link)) {
6046 struct ata_port *ap = link->ap;
6048 if (sata_scr_valid(link))
6049 return ap->ops->scr_write(ap, reg, val);
6053 return sata_pmp_scr_write(link, reg, val);
6057 * sata_scr_write_flush - write SCR register of the specified port and flush
6058 * @link: ATA link to write SCR for
6059 * @reg: SCR to write
6060 * @val: value to write
6062 * This function is identical to sata_scr_write() except that this
6063 * function performs flush after writing to the register.
6066 * None if @link is ap->link. Kernel thread context otherwise.
6069 * 0 on success, negative errno on failure.
6071 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6073 if (ata_is_host_link(link)) {
6074 struct ata_port *ap = link->ap;
6077 if (sata_scr_valid(link)) {
6078 rc = ap->ops->scr_write(ap, reg, val);
6080 rc = ap->ops->scr_read(ap, reg, &val);
6086 return sata_pmp_scr_write(link, reg, val);
6090 * ata_link_online - test whether the given link is online
6091 * @link: ATA link to test
6093 * Test whether @link is online. Note that this function returns
6094 * 0 if online status of @link cannot be obtained, so
6095 * ata_link_online(link) != !ata_link_offline(link).
6101 * 1 if the port online status is available and online.
6103 int ata_link_online(struct ata_link *link)
6107 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6108 (sstatus & 0xf) == 0x3)
6114 * ata_link_offline - test whether the given link is offline
6115 * @link: ATA link to test
6117 * Test whether @link is offline. Note that this function
6118 * returns 0 if offline status of @link cannot be obtained, so
6119 * ata_link_online(link) != !ata_link_offline(link).
6125 * 1 if the port offline status is available and offline.
6127 int ata_link_offline(struct ata_link *link)
6131 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6132 (sstatus & 0xf) != 0x3)
6137 int ata_flush_cache(struct ata_device *dev)
6139 unsigned int err_mask;
6142 if (!ata_try_flush_cache(dev))
6145 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6146 cmd = ATA_CMD_FLUSH_EXT;
6148 cmd = ATA_CMD_FLUSH;
6150 /* This is wrong. On a failed flush we get back the LBA of the lost
6151 sector and we should (assuming it wasn't aborted as unknown) issue
6152 a further flush command to continue the writeback until it
6154 err_mask = ata_do_simple_cmd(dev, cmd);
6156 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6164 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6165 unsigned int action, unsigned int ehi_flags,
6168 unsigned long flags;
6171 for (i = 0; i < host->n_ports; i++) {
6172 struct ata_port *ap = host->ports[i];
6173 struct ata_link *link;
6175 /* Previous resume operation might still be in
6176 * progress. Wait for PM_PENDING to clear.
6178 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6179 ata_port_wait_eh(ap);
6180 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6183 /* request PM ops to EH */
6184 spin_lock_irqsave(ap->lock, flags);
6189 ap->pm_result = &rc;
6192 ap->pflags |= ATA_PFLAG_PM_PENDING;
6193 __ata_port_for_each_link(link, ap) {
6194 link->eh_info.action |= action;
6195 link->eh_info.flags |= ehi_flags;
6198 ata_port_schedule_eh(ap);
6200 spin_unlock_irqrestore(ap->lock, flags);
6202 /* wait and check result */
6204 ata_port_wait_eh(ap);
6205 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6215 * ata_host_suspend - suspend host
6216 * @host: host to suspend
6219 * Suspend @host. Actual operation is performed by EH. This
6220 * function requests EH to perform PM operations and waits for EH
6224 * Kernel thread context (may sleep).
6227 * 0 on success, -errno on failure.
6229 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6233 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
6235 host->dev->power.power_state = mesg;
6240 * ata_host_resume - resume host
6241 * @host: host to resume
6243 * Resume @host. Actual operation is performed by EH. This
6244 * function requests EH to perform PM operations and returns.
6245 * Note that all resume operations are performed parallely.
6248 * Kernel thread context (may sleep).
6250 void ata_host_resume(struct ata_host *host)
6252 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6253 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6254 host->dev->power.power_state = PMSG_ON;
6259 * ata_port_start - Set port up for dma.
6260 * @ap: Port to initialize
6262 * Called just after data structures for each port are
6263 * initialized. Allocates space for PRD table.
6265 * May be used as the port_start() entry in ata_port_operations.
6268 * Inherited from caller.
6270 int ata_port_start(struct ata_port *ap)
6272 struct device *dev = ap->dev;
6275 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6280 rc = ata_pad_alloc(ap, dev);
6284 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6285 (unsigned long long)ap->prd_dma);
6290 * ata_dev_init - Initialize an ata_device structure
6291 * @dev: Device structure to initialize
6293 * Initialize @dev in preparation for probing.
6296 * Inherited from caller.
6298 void ata_dev_init(struct ata_device *dev)
6300 struct ata_link *link = dev->link;
6301 struct ata_port *ap = link->ap;
6302 unsigned long flags;
6304 /* SATA spd limit is bound to the first device */
6305 link->sata_spd_limit = link->hw_sata_spd_limit;
6308 /* High bits of dev->flags are used to record warm plug
6309 * requests which occur asynchronously. Synchronize using
6312 spin_lock_irqsave(ap->lock, flags);
6313 dev->flags &= ~ATA_DFLAG_INIT_MASK;
6315 spin_unlock_irqrestore(ap->lock, flags);
6317 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6318 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6319 dev->pio_mask = UINT_MAX;
6320 dev->mwdma_mask = UINT_MAX;
6321 dev->udma_mask = UINT_MAX;
6325 * ata_link_init - Initialize an ata_link structure
6326 * @ap: ATA port link is attached to
6327 * @link: Link structure to initialize
6328 * @pmp: Port multiplier port number
6333 * Kernel thread context (may sleep)
6335 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6339 /* clear everything except for devices */
6340 memset(link, 0, offsetof(struct ata_link, device[0]));
6344 link->active_tag = ATA_TAG_POISON;
6345 link->hw_sata_spd_limit = UINT_MAX;
6347 /* can't use iterator, ap isn't initialized yet */
6348 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6349 struct ata_device *dev = &link->device[i];
6352 dev->devno = dev - link->device;
6358 * sata_link_init_spd - Initialize link->sata_spd_limit
6359 * @link: Link to configure sata_spd_limit for
6361 * Initialize @link->[hw_]sata_spd_limit to the currently
6365 * Kernel thread context (may sleep).
6368 * 0 on success, -errno on failure.
6370 int sata_link_init_spd(struct ata_link *link)
6375 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6379 spd = (scontrol >> 4) & 0xf;
6381 link->hw_sata_spd_limit &= (1 << spd) - 1;
6383 link->sata_spd_limit = link->hw_sata_spd_limit;
6389 * ata_port_alloc - allocate and initialize basic ATA port resources
6390 * @host: ATA host this allocated port belongs to
6392 * Allocate and initialize basic ATA port resources.
6395 * Allocate ATA port on success, NULL on failure.
6398 * Inherited from calling layer (may sleep).
6400 struct ata_port *ata_port_alloc(struct ata_host *host)
6402 struct ata_port *ap;
6406 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6410 ap->pflags |= ATA_PFLAG_INITIALIZING;
6411 ap->lock = &host->lock;
6412 ap->flags = ATA_FLAG_DISABLED;
6414 ap->ctl = ATA_DEVCTL_OBS;
6416 ap->dev = host->dev;
6417 ap->last_ctl = 0xFF;
6419 #if defined(ATA_VERBOSE_DEBUG)
6420 /* turn on all debugging levels */
6421 ap->msg_enable = 0x00FF;
6422 #elif defined(ATA_DEBUG)
6423 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6425 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6428 INIT_DELAYED_WORK(&ap->port_task, NULL);
6429 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6430 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6431 INIT_LIST_HEAD(&ap->eh_done_q);
6432 init_waitqueue_head(&ap->eh_wait_q);
6433 init_timer_deferrable(&ap->fastdrain_timer);
6434 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6435 ap->fastdrain_timer.data = (unsigned long)ap;
6437 ap->cbl = ATA_CBL_NONE;
6439 ata_link_init(ap, &ap->link, 0);
6442 ap->stats.unhandled_irq = 1;
6443 ap->stats.idle_irq = 1;
6448 static void ata_host_release(struct device *gendev, void *res)
6450 struct ata_host *host = dev_get_drvdata(gendev);
6453 for (i = 0; i < host->n_ports; i++) {
6454 struct ata_port *ap = host->ports[i];
6459 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6460 ap->ops->port_stop(ap);
6463 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6464 host->ops->host_stop(host);
6466 for (i = 0; i < host->n_ports; i++) {
6467 struct ata_port *ap = host->ports[i];
6473 scsi_host_put(ap->scsi_host);
6475 kfree(ap->pmp_link);
6477 host->ports[i] = NULL;
6480 dev_set_drvdata(gendev, NULL);
6484 * ata_host_alloc - allocate and init basic ATA host resources
6485 * @dev: generic device this host is associated with
6486 * @max_ports: maximum number of ATA ports associated with this host
6488 * Allocate and initialize basic ATA host resources. LLD calls
6489 * this function to allocate a host, initializes it fully and
6490 * attaches it using ata_host_register().
6492 * @max_ports ports are allocated and host->n_ports is
6493 * initialized to @max_ports. The caller is allowed to decrease
6494 * host->n_ports before calling ata_host_register(). The unused
6495 * ports will be automatically freed on registration.
6498 * Allocate ATA host on success, NULL on failure.
6501 * Inherited from calling layer (may sleep).
6503 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6505 struct ata_host *host;
6511 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6514 /* alloc a container for our list of ATA ports (buses) */
6515 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6516 /* alloc a container for our list of ATA ports (buses) */
6517 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6521 devres_add(dev, host);
6522 dev_set_drvdata(dev, host);
6524 spin_lock_init(&host->lock);
6526 host->n_ports = max_ports;
6528 /* allocate ports bound to this host */
6529 for (i = 0; i < max_ports; i++) {
6530 struct ata_port *ap;
6532 ap = ata_port_alloc(host);
6537 host->ports[i] = ap;
6540 devres_remove_group(dev, NULL);
6544 devres_release_group(dev, NULL);
6549 * ata_host_alloc_pinfo - alloc host and init with port_info array
6550 * @dev: generic device this host is associated with
6551 * @ppi: array of ATA port_info to initialize host with
6552 * @n_ports: number of ATA ports attached to this host
6554 * Allocate ATA host and initialize with info from @ppi. If NULL
6555 * terminated, @ppi may contain fewer entries than @n_ports. The
6556 * last entry will be used for the remaining ports.
6559 * Allocate ATA host on success, NULL on failure.
6562 * Inherited from calling layer (may sleep).
6564 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6565 const struct ata_port_info * const * ppi,
6568 const struct ata_port_info *pi;
6569 struct ata_host *host;
6572 host = ata_host_alloc(dev, n_ports);
6576 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6577 struct ata_port *ap = host->ports[i];
6582 ap->pio_mask = pi->pio_mask;
6583 ap->mwdma_mask = pi->mwdma_mask;
6584 ap->udma_mask = pi->udma_mask;
6585 ap->flags |= pi->flags;
6586 ap->link.flags |= pi->link_flags;
6587 ap->ops = pi->port_ops;
6589 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6590 host->ops = pi->port_ops;
6591 if (!host->private_data && pi->private_data)
6592 host->private_data = pi->private_data;
6599 * ata_host_start - start and freeze ports of an ATA host
6600 * @host: ATA host to start ports for
6602 * Start and then freeze ports of @host. Started status is
6603 * recorded in host->flags, so this function can be called
6604 * multiple times. Ports are guaranteed to get started only
6605 * once. If host->ops isn't initialized yet, its set to the
6606 * first non-dummy port ops.
6609 * Inherited from calling layer (may sleep).
6612 * 0 if all ports are started successfully, -errno otherwise.
6614 int ata_host_start(struct ata_host *host)
6618 if (host->flags & ATA_HOST_STARTED)
6621 for (i = 0; i < host->n_ports; i++) {
6622 struct ata_port *ap = host->ports[i];
6624 if (!host->ops && !ata_port_is_dummy(ap))
6625 host->ops = ap->ops;
6627 if (ap->ops->port_start) {
6628 rc = ap->ops->port_start(ap);
6630 ata_port_printk(ap, KERN_ERR, "failed to "
6631 "start port (errno=%d)\n", rc);
6636 ata_eh_freeze_port(ap);
6639 host->flags |= ATA_HOST_STARTED;
6644 struct ata_port *ap = host->ports[i];
6646 if (ap->ops->port_stop)
6647 ap->ops->port_stop(ap);
6653 * ata_sas_host_init - Initialize a host struct
6654 * @host: host to initialize
6655 * @dev: device host is attached to
6656 * @flags: host flags
6660 * PCI/etc. bus probe sem.
6663 /* KILLME - the only user left is ipr */
6664 void ata_host_init(struct ata_host *host, struct device *dev,
6665 unsigned long flags, const struct ata_port_operations *ops)
6667 spin_lock_init(&host->lock);
6669 host->flags = flags;
6674 * ata_host_register - register initialized ATA host
6675 * @host: ATA host to register
6676 * @sht: template for SCSI host
6678 * Register initialized ATA host. @host is allocated using
6679 * ata_host_alloc() and fully initialized by LLD. This function
6680 * starts ports, registers @host with ATA and SCSI layers and
6681 * probe registered devices.
6684 * Inherited from calling layer (may sleep).
6687 * 0 on success, -errno otherwise.
6689 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6693 /* host must have been started */
6694 if (!(host->flags & ATA_HOST_STARTED)) {
6695 dev_printk(KERN_ERR, host->dev,
6696 "BUG: trying to register unstarted host\n");
6701 /* Blow away unused ports. This happens when LLD can't
6702 * determine the exact number of ports to allocate at
6705 for (i = host->n_ports; host->ports[i]; i++)
6706 kfree(host->ports[i]);
6708 /* give ports names and add SCSI hosts */
6709 for (i = 0; i < host->n_ports; i++)
6710 host->ports[i]->print_id = ata_print_id++;
6712 rc = ata_scsi_add_hosts(host, sht);
6716 /* associate with ACPI nodes */
6717 ata_acpi_associate(host);
6719 /* set cable, sata_spd_limit and report */
6720 for (i = 0; i < host->n_ports; i++) {
6721 struct ata_port *ap = host->ports[i];
6722 unsigned long xfer_mask;
6724 /* set SATA cable type if still unset */
6725 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6726 ap->cbl = ATA_CBL_SATA;
6728 /* init sata_spd_limit to the current value */
6729 sata_link_init_spd(&ap->link);
6731 /* print per-port info to dmesg */
6732 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6735 if (!ata_port_is_dummy(ap))
6736 ata_port_printk(ap, KERN_INFO,
6737 "%cATA max %s %s\n",
6738 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6739 ata_mode_string(xfer_mask),
6740 ap->link.eh_info.desc);
6742 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6745 /* perform each probe synchronously */
6746 DPRINTK("probe begin\n");
6747 for (i = 0; i < host->n_ports; i++) {
6748 struct ata_port *ap = host->ports[i];
6752 if (ap->ops->error_handler) {
6753 struct ata_eh_info *ehi = &ap->link.eh_info;
6754 unsigned long flags;
6758 /* kick EH for boot probing */
6759 spin_lock_irqsave(ap->lock, flags);
6762 (1 << ata_link_max_devices(&ap->link)) - 1;
6763 ehi->action |= ATA_EH_SOFTRESET;
6764 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6766 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6767 ap->pflags |= ATA_PFLAG_LOADING;
6768 ata_port_schedule_eh(ap);
6770 spin_unlock_irqrestore(ap->lock, flags);
6772 /* wait for EH to finish */
6773 ata_port_wait_eh(ap);
6775 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6776 rc = ata_bus_probe(ap);
6777 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6780 /* FIXME: do something useful here?
6781 * Current libata behavior will
6782 * tear down everything when
6783 * the module is removed
6784 * or the h/w is unplugged.
6790 /* probes are done, now scan each port's disk(s) */
6791 DPRINTK("host probe begin\n");
6792 for (i = 0; i < host->n_ports; i++) {
6793 struct ata_port *ap = host->ports[i];
6795 ata_scsi_scan_host(ap, 1);
6802 * ata_host_activate - start host, request IRQ and register it
6803 * @host: target ATA host
6804 * @irq: IRQ to request
6805 * @irq_handler: irq_handler used when requesting IRQ
6806 * @irq_flags: irq_flags used when requesting IRQ
6807 * @sht: scsi_host_template to use when registering the host
6809 * After allocating an ATA host and initializing it, most libata
6810 * LLDs perform three steps to activate the host - start host,
6811 * request IRQ and register it. This helper takes necessasry
6812 * arguments and performs the three steps in one go.
6815 * Inherited from calling layer (may sleep).
6818 * 0 on success, -errno otherwise.
6820 int ata_host_activate(struct ata_host *host, int irq,
6821 irq_handler_t irq_handler, unsigned long irq_flags,
6822 struct scsi_host_template *sht)
6826 rc = ata_host_start(host);
6830 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6831 dev_driver_string(host->dev), host);
6835 for (i = 0; i < host->n_ports; i++)
6836 ata_port_desc(host->ports[i], "irq %d", irq);
6838 rc = ata_host_register(host, sht);
6839 /* if failed, just free the IRQ and leave ports alone */
6841 devm_free_irq(host->dev, irq, host);
6847 * ata_port_detach - Detach ATA port in prepration of device removal
6848 * @ap: ATA port to be detached
6850 * Detach all ATA devices and the associated SCSI devices of @ap;
6851 * then, remove the associated SCSI host. @ap is guaranteed to
6852 * be quiescent on return from this function.
6855 * Kernel thread context (may sleep).
6857 void ata_port_detach(struct ata_port *ap)
6859 unsigned long flags;
6860 struct ata_link *link;
6861 struct ata_device *dev;
6863 if (!ap->ops->error_handler)
6866 /* tell EH we're leaving & flush EH */
6867 spin_lock_irqsave(ap->lock, flags);
6868 ap->pflags |= ATA_PFLAG_UNLOADING;
6869 spin_unlock_irqrestore(ap->lock, flags);
6871 ata_port_wait_eh(ap);
6873 /* EH is now guaranteed to see UNLOADING, so no new device
6874 * will be attached. Disable all existing devices.
6876 spin_lock_irqsave(ap->lock, flags);
6878 ata_port_for_each_link(link, ap) {
6879 ata_link_for_each_dev(dev, link)
6880 ata_dev_disable(dev);
6883 spin_unlock_irqrestore(ap->lock, flags);
6885 /* Final freeze & EH. All in-flight commands are aborted. EH
6886 * will be skipped and retrials will be terminated with bad
6889 spin_lock_irqsave(ap->lock, flags);
6890 ata_port_freeze(ap); /* won't be thawed */
6891 spin_unlock_irqrestore(ap->lock, flags);
6893 ata_port_wait_eh(ap);
6894 cancel_rearming_delayed_work(&ap->hotplug_task);
6897 /* remove the associated SCSI host */
6898 scsi_remove_host(ap->scsi_host);
6902 * ata_host_detach - Detach all ports of an ATA host
6903 * @host: Host to detach
6905 * Detach all ports of @host.
6908 * Kernel thread context (may sleep).
6910 void ata_host_detach(struct ata_host *host)
6914 for (i = 0; i < host->n_ports; i++)
6915 ata_port_detach(host->ports[i]);
6919 * ata_std_ports - initialize ioaddr with standard port offsets.
6920 * @ioaddr: IO address structure to be initialized
6922 * Utility function which initializes data_addr, error_addr,
6923 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6924 * device_addr, status_addr, and command_addr to standard offsets
6925 * relative to cmd_addr.
6927 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6930 void ata_std_ports(struct ata_ioports *ioaddr)
6932 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6933 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6934 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6935 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6936 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6937 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6938 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6939 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6940 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6941 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6948 * ata_pci_remove_one - PCI layer callback for device removal
6949 * @pdev: PCI device that was removed
6951 * PCI layer indicates to libata via this hook that hot-unplug or
6952 * module unload event has occurred. Detach all ports. Resource
6953 * release is handled via devres.
6956 * Inherited from PCI layer (may sleep).
6958 void ata_pci_remove_one(struct pci_dev *pdev)
6960 struct device *dev = pci_dev_to_dev(pdev);
6961 struct ata_host *host = dev_get_drvdata(dev);
6963 ata_host_detach(host);
6966 /* move to PCI subsystem */
6967 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6969 unsigned long tmp = 0;
6971 switch (bits->width) {
6974 pci_read_config_byte(pdev, bits->reg, &tmp8);
6980 pci_read_config_word(pdev, bits->reg, &tmp16);
6986 pci_read_config_dword(pdev, bits->reg, &tmp32);
6997 return (tmp == bits->val) ? 1 : 0;
7001 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7003 pci_save_state(pdev);
7004 pci_disable_device(pdev);
7006 if (mesg.event == PM_EVENT_SUSPEND)
7007 pci_set_power_state(pdev, PCI_D3hot);
7010 int ata_pci_device_do_resume(struct pci_dev *pdev)
7014 pci_set_power_state(pdev, PCI_D0);
7015 pci_restore_state(pdev);
7017 rc = pcim_enable_device(pdev);
7019 dev_printk(KERN_ERR, &pdev->dev,
7020 "failed to enable device after resume (%d)\n", rc);
7024 pci_set_master(pdev);
7028 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7030 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7033 rc = ata_host_suspend(host, mesg);
7037 ata_pci_device_do_suspend(pdev, mesg);
7042 int ata_pci_device_resume(struct pci_dev *pdev)
7044 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7047 rc = ata_pci_device_do_resume(pdev);
7049 ata_host_resume(host);
7052 #endif /* CONFIG_PM */
7054 #endif /* CONFIG_PCI */
7057 static int __init ata_init(void)
7059 ata_probe_timeout *= HZ;
7060 ata_wq = create_workqueue("ata");
7064 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7066 destroy_workqueue(ata_wq);
7070 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7074 static void __exit ata_exit(void)
7076 destroy_workqueue(ata_wq);
7077 destroy_workqueue(ata_aux_wq);
7080 subsys_initcall(ata_init);
7081 module_exit(ata_exit);
7083 static unsigned long ratelimit_time;
7084 static DEFINE_SPINLOCK(ata_ratelimit_lock);
7086 int ata_ratelimit(void)
7089 unsigned long flags;
7091 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7093 if (time_after(jiffies, ratelimit_time)) {
7095 ratelimit_time = jiffies + (HZ/5);
7099 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7105 * ata_wait_register - wait until register value changes
7106 * @reg: IO-mapped register
7107 * @mask: Mask to apply to read register value
7108 * @val: Wait condition
7109 * @interval_msec: polling interval in milliseconds
7110 * @timeout_msec: timeout in milliseconds
7112 * Waiting for some bits of register to change is a common
7113 * operation for ATA controllers. This function reads 32bit LE
7114 * IO-mapped register @reg and tests for the following condition.
7116 * (*@reg & mask) != val
7118 * If the condition is met, it returns; otherwise, the process is
7119 * repeated after @interval_msec until timeout.
7122 * Kernel thread context (may sleep)
7125 * The final register value.
7127 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7128 unsigned long interval_msec,
7129 unsigned long timeout_msec)
7131 unsigned long timeout;
7134 tmp = ioread32(reg);
7136 /* Calculate timeout _after_ the first read to make sure
7137 * preceding writes reach the controller before starting to
7138 * eat away the timeout.
7140 timeout = jiffies + (timeout_msec * HZ) / 1000;
7142 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7143 msleep(interval_msec);
7144 tmp = ioread32(reg);
7153 static void ata_dummy_noret(struct ata_port *ap) { }
7154 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7155 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7157 static u8 ata_dummy_check_status(struct ata_port *ap)
7162 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7164 return AC_ERR_SYSTEM;
7167 const struct ata_port_operations ata_dummy_port_ops = {
7168 .check_status = ata_dummy_check_status,
7169 .check_altstatus = ata_dummy_check_status,
7170 .dev_select = ata_noop_dev_select,
7171 .qc_prep = ata_noop_qc_prep,
7172 .qc_issue = ata_dummy_qc_issue,
7173 .freeze = ata_dummy_noret,
7174 .thaw = ata_dummy_noret,
7175 .error_handler = ata_dummy_noret,
7176 .post_internal_cmd = ata_dummy_qc_noret,
7177 .irq_clear = ata_dummy_noret,
7178 .port_start = ata_dummy_ret0,
7179 .port_stop = ata_dummy_noret,
7182 const struct ata_port_info ata_dummy_port_info = {
7183 .port_ops = &ata_dummy_port_ops,
7187 * libata is essentially a library of internal helper functions for
7188 * low-level ATA host controller drivers. As such, the API/ABI is
7189 * likely to change as new drivers are added and updated.
7190 * Do not depend on ABI/API stability.
7193 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7194 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7195 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7196 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7197 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7198 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7199 EXPORT_SYMBOL_GPL(ata_std_ports);
7200 EXPORT_SYMBOL_GPL(ata_host_init);
7201 EXPORT_SYMBOL_GPL(ata_host_alloc);
7202 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7203 EXPORT_SYMBOL_GPL(ata_host_start);
7204 EXPORT_SYMBOL_GPL(ata_host_register);
7205 EXPORT_SYMBOL_GPL(ata_host_activate);
7206 EXPORT_SYMBOL_GPL(ata_host_detach);
7207 EXPORT_SYMBOL_GPL(ata_sg_init);
7208 EXPORT_SYMBOL_GPL(ata_sg_init_one);
7209 EXPORT_SYMBOL_GPL(ata_hsm_move);
7210 EXPORT_SYMBOL_GPL(ata_qc_complete);
7211 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7212 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7213 EXPORT_SYMBOL_GPL(ata_tf_load);
7214 EXPORT_SYMBOL_GPL(ata_tf_read);
7215 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7216 EXPORT_SYMBOL_GPL(ata_std_dev_select);
7217 EXPORT_SYMBOL_GPL(sata_print_link_status);
7218 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7219 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7220 EXPORT_SYMBOL_GPL(ata_check_status);
7221 EXPORT_SYMBOL_GPL(ata_altstatus);
7222 EXPORT_SYMBOL_GPL(ata_exec_command);
7223 EXPORT_SYMBOL_GPL(ata_port_start);
7224 EXPORT_SYMBOL_GPL(ata_sff_port_start);
7225 EXPORT_SYMBOL_GPL(ata_interrupt);
7226 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7227 EXPORT_SYMBOL_GPL(ata_data_xfer);
7228 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
7229 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7230 EXPORT_SYMBOL_GPL(ata_qc_prep);
7231 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7232 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7233 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7234 EXPORT_SYMBOL_GPL(ata_bmdma_start);
7235 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7236 EXPORT_SYMBOL_GPL(ata_bmdma_status);
7237 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7238 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7239 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7240 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7241 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7242 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7243 EXPORT_SYMBOL_GPL(ata_port_probe);
7244 EXPORT_SYMBOL_GPL(ata_dev_disable);
7245 EXPORT_SYMBOL_GPL(sata_set_spd);
7246 EXPORT_SYMBOL_GPL(sata_link_debounce);
7247 EXPORT_SYMBOL_GPL(sata_link_resume);
7248 EXPORT_SYMBOL_GPL(sata_phy_reset);
7249 EXPORT_SYMBOL_GPL(__sata_phy_reset);
7250 EXPORT_SYMBOL_GPL(ata_bus_reset);
7251 EXPORT_SYMBOL_GPL(ata_std_prereset);
7252 EXPORT_SYMBOL_GPL(ata_std_softreset);
7253 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7254 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7255 EXPORT_SYMBOL_GPL(ata_std_postreset);
7256 EXPORT_SYMBOL_GPL(ata_dev_classify);
7257 EXPORT_SYMBOL_GPL(ata_dev_pair);
7258 EXPORT_SYMBOL_GPL(ata_port_disable);
7259 EXPORT_SYMBOL_GPL(ata_ratelimit);
7260 EXPORT_SYMBOL_GPL(ata_wait_register);
7261 EXPORT_SYMBOL_GPL(ata_busy_sleep);
7262 EXPORT_SYMBOL_GPL(ata_wait_ready);
7263 EXPORT_SYMBOL_GPL(ata_port_queue_task);
7264 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7265 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7266 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7267 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7268 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7269 EXPORT_SYMBOL_GPL(ata_host_intr);
7270 EXPORT_SYMBOL_GPL(sata_scr_valid);
7271 EXPORT_SYMBOL_GPL(sata_scr_read);
7272 EXPORT_SYMBOL_GPL(sata_scr_write);
7273 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7274 EXPORT_SYMBOL_GPL(ata_link_online);
7275 EXPORT_SYMBOL_GPL(ata_link_offline);
7277 EXPORT_SYMBOL_GPL(ata_host_suspend);
7278 EXPORT_SYMBOL_GPL(ata_host_resume);
7279 #endif /* CONFIG_PM */
7280 EXPORT_SYMBOL_GPL(ata_id_string);
7281 EXPORT_SYMBOL_GPL(ata_id_c_string);
7282 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7283 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7285 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7286 EXPORT_SYMBOL_GPL(ata_timing_compute);
7287 EXPORT_SYMBOL_GPL(ata_timing_merge);
7290 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7291 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
7292 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7293 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7294 EXPORT_SYMBOL_GPL(ata_pci_init_one);
7295 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7297 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7298 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7299 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7300 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7301 #endif /* CONFIG_PM */
7302 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7303 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7304 #endif /* CONFIG_PCI */
7306 EXPORT_SYMBOL_GPL(sata_pmp_read_init_tf);
7307 EXPORT_SYMBOL_GPL(sata_pmp_read_val);
7308 EXPORT_SYMBOL_GPL(sata_pmp_write_init_tf);
7309 EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7310 EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7311 EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7312 EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7314 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7315 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7316 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7317 EXPORT_SYMBOL_GPL(ata_port_desc);
7319 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7320 #endif /* CONFIG_PCI */
7321 EXPORT_SYMBOL_GPL(ata_eng_timeout);
7322 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7323 EXPORT_SYMBOL_GPL(ata_link_abort);
7324 EXPORT_SYMBOL_GPL(ata_port_abort);
7325 EXPORT_SYMBOL_GPL(ata_port_freeze);
7326 EXPORT_SYMBOL_GPL(sata_async_notification);
7327 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7328 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7329 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7330 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7331 EXPORT_SYMBOL_GPL(ata_do_eh);
7332 EXPORT_SYMBOL_GPL(ata_irq_on);
7333 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7335 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7336 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7337 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7338 EXPORT_SYMBOL_GPL(ata_cable_sata);