2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
62 #define DRV_VERSION "2.10" /* must be exactly four chars */
65 /* debounce timing parameters in msecs { interval, duration, timeout } */
66 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
70 static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73 static void ata_dev_xfermask(struct ata_device *dev);
75 static unsigned int ata_unique_id = 1;
76 static struct workqueue_struct *ata_wq;
78 struct workqueue_struct *ata_aux_wq;
80 int atapi_enabled = 1;
81 module_param(atapi_enabled, int, 0444);
82 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
85 module_param(atapi_dmadir, int, 0444);
86 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
89 module_param_named(fua, libata_fua, int, 0444);
90 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
92 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93 module_param(ata_probe_timeout, int, 0444);
94 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
97 module_param(noacpi, int, 0444);
98 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
100 MODULE_AUTHOR("Jeff Garzik");
101 MODULE_DESCRIPTION("Library module for ATA devices");
102 MODULE_LICENSE("GPL");
103 MODULE_VERSION(DRV_VERSION);
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
116 * Inherited from caller.
119 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
138 fis[13] = tf->hob_nsect;
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
156 * Inherited from caller.
159 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
174 tf->hob_nsect = fis[13];
177 static const u8 ata_rw_cmds[] = {
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
204 ATA_CMD_WRITE_FUA_EXT
208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
212 * Examine the device configuration and tf->flags to calculate
213 * the proper read/write commands and protocol to use.
218 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
222 int index, fua, lba48, write;
224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
230 index = dev->multi_count ? 0 : 8;
231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
234 index = dev->multi_count ? 0 : 8;
236 tf->protocol = ATA_PROT_DMA;
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
261 * Block address read from @tf.
263 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
273 block |= (tf->device & 0xf) << 24;
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
311 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
318 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
319 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
320 likely(tag != ATA_TAG_INTERNAL)) {
322 if (!lba_48_ok(block, n_block))
325 tf->protocol = ATA_PROT_NCQ;
326 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
328 if (tf->flags & ATA_TFLAG_WRITE)
329 tf->command = ATA_CMD_FPDMA_WRITE;
331 tf->command = ATA_CMD_FPDMA_READ;
333 tf->nsect = tag << 3;
334 tf->hob_feature = (n_block >> 8) & 0xff;
335 tf->feature = n_block & 0xff;
337 tf->hob_lbah = (block >> 40) & 0xff;
338 tf->hob_lbam = (block >> 32) & 0xff;
339 tf->hob_lbal = (block >> 24) & 0xff;
340 tf->lbah = (block >> 16) & 0xff;
341 tf->lbam = (block >> 8) & 0xff;
342 tf->lbal = block & 0xff;
345 if (tf->flags & ATA_TFLAG_FUA)
346 tf->device |= 1 << 7;
347 } else if (dev->flags & ATA_DFLAG_LBA) {
348 tf->flags |= ATA_TFLAG_LBA;
350 if (lba_28_ok(block, n_block)) {
352 tf->device |= (block >> 24) & 0xf;
353 } else if (lba_48_ok(block, n_block)) {
354 if (!(dev->flags & ATA_DFLAG_LBA48))
358 tf->flags |= ATA_TFLAG_LBA48;
360 tf->hob_nsect = (n_block >> 8) & 0xff;
362 tf->hob_lbah = (block >> 40) & 0xff;
363 tf->hob_lbam = (block >> 32) & 0xff;
364 tf->hob_lbal = (block >> 24) & 0xff;
366 /* request too large even for LBA48 */
369 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
372 tf->nsect = n_block & 0xff;
374 tf->lbah = (block >> 16) & 0xff;
375 tf->lbam = (block >> 8) & 0xff;
376 tf->lbal = block & 0xff;
378 tf->device |= ATA_LBA;
381 u32 sect, head, cyl, track;
383 /* The request -may- be too large for CHS addressing. */
384 if (!lba_28_ok(block, n_block))
387 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
390 /* Convert LBA to CHS */
391 track = (u32)block / dev->sectors;
392 cyl = track / dev->heads;
393 head = track % dev->heads;
394 sect = (u32)block % dev->sectors + 1;
396 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
397 (u32)block, track, cyl, head, sect);
399 /* Check whether the converted CHS can fit.
403 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
406 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
417 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
418 * @pio_mask: pio_mask
419 * @mwdma_mask: mwdma_mask
420 * @udma_mask: udma_mask
422 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
423 * unsigned int xfer_mask.
431 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
432 unsigned int mwdma_mask,
433 unsigned int udma_mask)
435 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
436 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
437 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
441 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
442 * @xfer_mask: xfer_mask to unpack
443 * @pio_mask: resulting pio_mask
444 * @mwdma_mask: resulting mwdma_mask
445 * @udma_mask: resulting udma_mask
447 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
448 * Any NULL distination masks will be ignored.
450 static void ata_unpack_xfermask(unsigned int xfer_mask,
451 unsigned int *pio_mask,
452 unsigned int *mwdma_mask,
453 unsigned int *udma_mask)
456 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
458 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
460 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
463 static const struct ata_xfer_ent {
467 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
468 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
469 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
474 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
475 * @xfer_mask: xfer_mask of interest
477 * Return matching XFER_* value for @xfer_mask. Only the highest
478 * bit of @xfer_mask is considered.
484 * Matching XFER_* value, 0 if no match found.
486 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
488 int highbit = fls(xfer_mask) - 1;
489 const struct ata_xfer_ent *ent;
491 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
492 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
493 return ent->base + highbit - ent->shift;
498 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
499 * @xfer_mode: XFER_* of interest
501 * Return matching xfer_mask for @xfer_mode.
507 * Matching xfer_mask, 0 if no match found.
509 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
511 const struct ata_xfer_ent *ent;
513 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
514 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
515 return 1 << (ent->shift + xfer_mode - ent->base);
520 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
521 * @xfer_mode: XFER_* of interest
523 * Return matching xfer_shift for @xfer_mode.
529 * Matching xfer_shift, -1 if no match found.
531 static int ata_xfer_mode2shift(unsigned int xfer_mode)
533 const struct ata_xfer_ent *ent;
535 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
536 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
542 * ata_mode_string - convert xfer_mask to string
543 * @xfer_mask: mask of bits supported; only highest bit counts.
545 * Determine string which represents the highest speed
546 * (highest bit in @modemask).
552 * Constant C string representing highest speed listed in
553 * @mode_mask, or the constant C string "<n/a>".
555 static const char *ata_mode_string(unsigned int xfer_mask)
557 static const char * const xfer_mode_str[] = {
581 highbit = fls(xfer_mask) - 1;
582 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
583 return xfer_mode_str[highbit];
587 static const char *sata_spd_string(unsigned int spd)
589 static const char * const spd_str[] = {
594 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
596 return spd_str[spd - 1];
599 void ata_dev_disable(struct ata_device *dev)
601 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
602 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
608 * ata_devchk - PATA device presence detection
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
625 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
627 struct ata_ioports *ioaddr = &ap->ioaddr;
630 ap->ops->dev_select(ap, device);
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
647 return 0; /* nothing found */
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
666 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
693 * @r_err: Value of error register on completion
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
712 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
714 struct ata_taskfile tf;
718 ap->ops->dev_select(ap, device);
720 memset(&tf, 0, sizeof(tf));
722 ap->ops->tf_read(ap, &tf);
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
733 else if ((device == 0) && (err == 0x81))
738 /* determine if device is ATA or ATAPI */
739 class = ata_dev_classify(&tf);
741 if (class == ATA_DEV_UNKNOWN)
743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
749 * ata_id_string - Convert IDENTIFY DEVICE page into string
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
763 void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
789 * This function is identical to ata_id_string except that it
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
796 void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
803 ata_id_string(id, s, ofs, len - 1);
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
811 static u64 ata_id_n_sectors(const u16 *id)
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
817 return ata_id_u32(id, 60);
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
822 return id[1] * id[3] * id[6];
827 * ata_noop_dev_select - Select device 0/1 on ATA bus
828 * @ap: ATA channel to manipulate
829 * @device: ATA device (numbered from zero) to select
831 * This function performs no actual function.
833 * May be used as the dev_select() entry in ata_port_operations.
838 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
844 * ata_std_dev_select - Select device 0/1 on ATA bus
845 * @ap: ATA channel to manipulate
846 * @device: ATA device (numbered from zero) to select
848 * Use the method defined in the ATA specification to
849 * make either device 0, or device 1, active on the
850 * ATA channel. Works with both PIO and MMIO.
852 * May be used as the dev_select() entry in ata_port_operations.
858 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
863 tmp = ATA_DEVICE_OBS;
865 tmp = ATA_DEVICE_OBS | ATA_DEV1;
867 iowrite8(tmp, ap->ioaddr.device_addr);
868 ata_pause(ap); /* needed; also flushes, for mmio */
872 * ata_dev_select - Select device 0/1 on ATA bus
873 * @ap: ATA channel to manipulate
874 * @device: ATA device (numbered from zero) to select
875 * @wait: non-zero to wait for Status register BSY bit to clear
876 * @can_sleep: non-zero if context allows sleeping
878 * Use the method defined in the ATA specification to
879 * make either device 0, or device 1, active on the
882 * This is a high-level version of ata_std_dev_select(),
883 * which additionally provides the services of inserting
884 * the proper pauses and status polling, where needed.
890 void ata_dev_select(struct ata_port *ap, unsigned int device,
891 unsigned int wait, unsigned int can_sleep)
893 if (ata_msg_probe(ap))
894 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
895 "device %u, wait %u\n", ap->id, device, wait);
900 ap->ops->dev_select(ap, device);
903 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
910 * ata_dump_id - IDENTIFY DEVICE info debugging output
911 * @id: IDENTIFY DEVICE page to dump
913 * Dump selected 16-bit words from the given IDENTIFY DEVICE
920 static inline void ata_dump_id(const u16 *id)
922 DPRINTK("49==0x%04x "
932 DPRINTK("80==0x%04x "
942 DPRINTK("88==0x%04x "
949 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
950 * @id: IDENTIFY data to compute xfer mask from
952 * Compute the xfermask for this device. This is not as trivial
953 * as it seems if we must consider early devices correctly.
955 * FIXME: pre IDE drive timing (do we care ?).
963 static unsigned int ata_id_xfermask(const u16 *id)
965 unsigned int pio_mask, mwdma_mask, udma_mask;
967 /* Usual case. Word 53 indicates word 64 is valid */
968 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
969 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
973 /* If word 64 isn't valid then Word 51 high byte holds
974 * the PIO timing number for the maximum. Turn it into
977 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
978 if (mode < 5) /* Valid PIO range */
979 pio_mask = (2 << mode) - 1;
983 /* But wait.. there's more. Design your standards by
984 * committee and you too can get a free iordy field to
985 * process. However its the speeds not the modes that
986 * are supported... Note drivers using the timing API
987 * will get this right anyway
991 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
993 if (ata_id_is_cfa(id)) {
995 * Process compact flash extended modes
997 int pio = id[163] & 0x7;
998 int dma = (id[163] >> 3) & 7;
1001 pio_mask |= (1 << 5);
1003 pio_mask |= (1 << 6);
1005 mwdma_mask |= (1 << 3);
1007 mwdma_mask |= (1 << 4);
1011 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1012 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1014 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1018 * ata_port_queue_task - Queue port_task
1019 * @ap: The ata_port to queue port_task for
1020 * @fn: workqueue function to be scheduled
1021 * @data: data for @fn to use
1022 * @delay: delay time for workqueue function
1024 * Schedule @fn(@data) for execution after @delay jiffies using
1025 * port_task. There is one port_task per port and it's the
1026 * user(low level driver)'s responsibility to make sure that only
1027 * one task is active at any given time.
1029 * libata core layer takes care of synchronization between
1030 * port_task and EH. ata_port_queue_task() may be ignored for EH
1034 * Inherited from caller.
1036 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1037 unsigned long delay)
1041 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
1044 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1045 ap->port_task_data = data;
1047 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
1049 /* rc == 0 means that another user is using port task */
1054 * ata_port_flush_task - Flush port_task
1055 * @ap: The ata_port to flush port_task for
1057 * After this function completes, port_task is guranteed not to
1058 * be running or scheduled.
1061 * Kernel thread context (may sleep)
1063 void ata_port_flush_task(struct ata_port *ap)
1065 unsigned long flags;
1069 spin_lock_irqsave(ap->lock, flags);
1070 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
1071 spin_unlock_irqrestore(ap->lock, flags);
1073 DPRINTK("flush #1\n");
1074 flush_workqueue(ata_wq);
1077 * At this point, if a task is running, it's guaranteed to see
1078 * the FLUSH flag; thus, it will never queue pio tasks again.
1081 if (!cancel_delayed_work(&ap->port_task)) {
1082 if (ata_msg_ctl(ap))
1083 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1085 flush_workqueue(ata_wq);
1088 spin_lock_irqsave(ap->lock, flags);
1089 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
1090 spin_unlock_irqrestore(ap->lock, flags);
1092 if (ata_msg_ctl(ap))
1093 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1096 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1098 struct completion *waiting = qc->private_data;
1104 * ata_exec_internal_sg - execute libata internal command
1105 * @dev: Device to which the command is sent
1106 * @tf: Taskfile registers for the command and the result
1107 * @cdb: CDB for packet command
1108 * @dma_dir: Data tranfer direction of the command
1109 * @sg: sg list for the data buffer of the command
1110 * @n_elem: Number of sg entries
1112 * Executes libata internal command with timeout. @tf contains
1113 * command on entry and result on return. Timeout and error
1114 * conditions are reported via return value. No recovery action
1115 * is taken after a command times out. It's caller's duty to
1116 * clean up after timeout.
1119 * None. Should be called with kernel context, might sleep.
1122 * Zero on success, AC_ERR_* mask on failure
1124 unsigned ata_exec_internal_sg(struct ata_device *dev,
1125 struct ata_taskfile *tf, const u8 *cdb,
1126 int dma_dir, struct scatterlist *sg,
1127 unsigned int n_elem)
1129 struct ata_port *ap = dev->ap;
1130 u8 command = tf->command;
1131 struct ata_queued_cmd *qc;
1132 unsigned int tag, preempted_tag;
1133 u32 preempted_sactive, preempted_qc_active;
1134 DECLARE_COMPLETION_ONSTACK(wait);
1135 unsigned long flags;
1136 unsigned int err_mask;
1139 spin_lock_irqsave(ap->lock, flags);
1141 /* no internal command while frozen */
1142 if (ap->pflags & ATA_PFLAG_FROZEN) {
1143 spin_unlock_irqrestore(ap->lock, flags);
1144 return AC_ERR_SYSTEM;
1147 /* initialize internal qc */
1149 /* XXX: Tag 0 is used for drivers with legacy EH as some
1150 * drivers choke if any other tag is given. This breaks
1151 * ata_tag_internal() test for those drivers. Don't use new
1152 * EH stuff without converting to it.
1154 if (ap->ops->error_handler)
1155 tag = ATA_TAG_INTERNAL;
1159 if (test_and_set_bit(tag, &ap->qc_allocated))
1161 qc = __ata_qc_from_tag(ap, tag);
1169 preempted_tag = ap->active_tag;
1170 preempted_sactive = ap->sactive;
1171 preempted_qc_active = ap->qc_active;
1172 ap->active_tag = ATA_TAG_POISON;
1176 /* prepare & issue qc */
1179 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1180 qc->flags |= ATA_QCFLAG_RESULT_TF;
1181 qc->dma_dir = dma_dir;
1182 if (dma_dir != DMA_NONE) {
1183 unsigned int i, buflen = 0;
1185 for (i = 0; i < n_elem; i++)
1186 buflen += sg[i].length;
1188 ata_sg_init(qc, sg, n_elem);
1189 qc->nbytes = buflen;
1192 qc->private_data = &wait;
1193 qc->complete_fn = ata_qc_complete_internal;
1197 spin_unlock_irqrestore(ap->lock, flags);
1199 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1201 ata_port_flush_task(ap);
1204 spin_lock_irqsave(ap->lock, flags);
1206 /* We're racing with irq here. If we lose, the
1207 * following test prevents us from completing the qc
1208 * twice. If we win, the port is frozen and will be
1209 * cleaned up by ->post_internal_cmd().
1211 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1212 qc->err_mask |= AC_ERR_TIMEOUT;
1214 if (ap->ops->error_handler)
1215 ata_port_freeze(ap);
1217 ata_qc_complete(qc);
1219 if (ata_msg_warn(ap))
1220 ata_dev_printk(dev, KERN_WARNING,
1221 "qc timeout (cmd 0x%x)\n", command);
1224 spin_unlock_irqrestore(ap->lock, flags);
1227 /* do post_internal_cmd */
1228 if (ap->ops->post_internal_cmd)
1229 ap->ops->post_internal_cmd(qc);
1231 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
1232 if (ata_msg_warn(ap))
1233 ata_dev_printk(dev, KERN_WARNING,
1234 "zero err_mask for failed "
1235 "internal command, assuming AC_ERR_OTHER\n");
1236 qc->err_mask |= AC_ERR_OTHER;
1240 spin_lock_irqsave(ap->lock, flags);
1242 *tf = qc->result_tf;
1243 err_mask = qc->err_mask;
1246 ap->active_tag = preempted_tag;
1247 ap->sactive = preempted_sactive;
1248 ap->qc_active = preempted_qc_active;
1250 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1251 * Until those drivers are fixed, we detect the condition
1252 * here, fail the command with AC_ERR_SYSTEM and reenable the
1255 * Note that this doesn't change any behavior as internal
1256 * command failure results in disabling the device in the
1257 * higher layer for LLDDs without new reset/EH callbacks.
1259 * Kill the following code as soon as those drivers are fixed.
1261 if (ap->flags & ATA_FLAG_DISABLED) {
1262 err_mask |= AC_ERR_SYSTEM;
1266 spin_unlock_irqrestore(ap->lock, flags);
1272 * ata_exec_internal - execute libata internal command
1273 * @dev: Device to which the command is sent
1274 * @tf: Taskfile registers for the command and the result
1275 * @cdb: CDB for packet command
1276 * @dma_dir: Data tranfer direction of the command
1277 * @buf: Data buffer of the command
1278 * @buflen: Length of data buffer
1280 * Wrapper around ata_exec_internal_sg() which takes simple
1281 * buffer instead of sg list.
1284 * None. Should be called with kernel context, might sleep.
1287 * Zero on success, AC_ERR_* mask on failure
1289 unsigned ata_exec_internal(struct ata_device *dev,
1290 struct ata_taskfile *tf, const u8 *cdb,
1291 int dma_dir, void *buf, unsigned int buflen)
1293 struct scatterlist *psg = NULL, sg;
1294 unsigned int n_elem = 0;
1296 if (dma_dir != DMA_NONE) {
1298 sg_init_one(&sg, buf, buflen);
1303 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
1307 * ata_do_simple_cmd - execute simple internal command
1308 * @dev: Device to which the command is sent
1309 * @cmd: Opcode to execute
1311 * Execute a 'simple' command, that only consists of the opcode
1312 * 'cmd' itself, without filling any other registers
1315 * Kernel thread context (may sleep).
1318 * Zero on success, AC_ERR_* mask on failure
1320 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1322 struct ata_taskfile tf;
1324 ata_tf_init(dev, &tf);
1327 tf.flags |= ATA_TFLAG_DEVICE;
1328 tf.protocol = ATA_PROT_NODATA;
1330 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1334 * ata_pio_need_iordy - check if iordy needed
1337 * Check if the current speed of the device requires IORDY. Used
1338 * by various controllers for chip configuration.
1341 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1344 int speed = adev->pio_mode - XFER_PIO_0;
1351 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1353 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1354 pio = adev->id[ATA_ID_EIDE_PIO];
1355 /* Is the speed faster than the drive allows non IORDY ? */
1357 /* This is cycle times not frequency - watch the logic! */
1358 if (pio > 240) /* PIO2 is 240nS per cycle */
1367 * ata_dev_read_id - Read ID data from the specified device
1368 * @dev: target device
1369 * @p_class: pointer to class of the target device (may be changed)
1370 * @flags: ATA_READID_* flags
1371 * @id: buffer to read IDENTIFY data into
1373 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1374 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1375 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1376 * for pre-ATA4 drives.
1379 * Kernel thread context (may sleep)
1382 * 0 on success, -errno otherwise.
1384 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1385 unsigned int flags, u16 *id)
1387 struct ata_port *ap = dev->ap;
1388 unsigned int class = *p_class;
1389 struct ata_taskfile tf;
1390 unsigned int err_mask = 0;
1394 if (ata_msg_ctl(ap))
1395 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1396 __FUNCTION__, ap->id, dev->devno);
1398 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1401 ata_tf_init(dev, &tf);
1405 tf.command = ATA_CMD_ID_ATA;
1408 tf.command = ATA_CMD_ID_ATAPI;
1412 reason = "unsupported class";
1416 tf.protocol = ATA_PROT_PIO;
1418 /* Some devices choke if TF registers contain garbage. Make
1419 * sure those are properly initialized.
1421 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1423 /* Device presence detection is unreliable on some
1424 * controllers. Always poll IDENTIFY if available.
1426 tf.flags |= ATA_TFLAG_POLLING;
1428 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1429 id, sizeof(id[0]) * ATA_ID_WORDS);
1431 if (err_mask & AC_ERR_NODEV_HINT) {
1432 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1433 ap->id, dev->devno);
1438 reason = "I/O error";
1442 swap_buf_le16(id, ATA_ID_WORDS);
1446 reason = "device reports illegal type";
1448 if (class == ATA_DEV_ATA) {
1449 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1452 if (ata_id_is_ata(id))
1456 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1458 * The exact sequence expected by certain pre-ATA4 drives is:
1461 * INITIALIZE DEVICE PARAMETERS
1463 * Some drives were very specific about that exact sequence.
1465 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1466 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1469 reason = "INIT_DEV_PARAMS failed";
1473 /* current CHS translation info (id[53-58]) might be
1474 * changed. reread the identify device info.
1476 flags &= ~ATA_READID_POSTRESET;
1486 if (ata_msg_warn(ap))
1487 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1488 "(%s, err_mask=0x%x)\n", reason, err_mask);
1492 static inline u8 ata_dev_knobble(struct ata_device *dev)
1494 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1497 static void ata_dev_config_ncq(struct ata_device *dev,
1498 char *desc, size_t desc_sz)
1500 struct ata_port *ap = dev->ap;
1501 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1503 if (!ata_id_has_ncq(dev->id)) {
1507 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1508 snprintf(desc, desc_sz, "NCQ (not used)");
1511 if (ap->flags & ATA_FLAG_NCQ) {
1512 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1513 dev->flags |= ATA_DFLAG_NCQ;
1516 if (hdepth >= ddepth)
1517 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1519 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1522 static void ata_set_port_max_cmd_len(struct ata_port *ap)
1526 if (ap->scsi_host) {
1527 unsigned int len = 0;
1529 for (i = 0; i < ATA_MAX_DEVICES; i++)
1530 len = max(len, ap->device[i].cdb_len);
1532 ap->scsi_host->max_cmd_len = len;
1537 * ata_dev_configure - Configure the specified ATA/ATAPI device
1538 * @dev: Target device to configure
1540 * Configure @dev according to @dev->id. Generic and low-level
1541 * driver specific fixups are also applied.
1544 * Kernel thread context (may sleep)
1547 * 0 on success, -errno otherwise
1549 int ata_dev_configure(struct ata_device *dev)
1551 struct ata_port *ap = dev->ap;
1552 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1553 const u16 *id = dev->id;
1554 unsigned int xfer_mask;
1555 char revbuf[7]; /* XYZ-99\0 */
1556 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1557 char modelbuf[ATA_ID_PROD_LEN+1];
1560 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1561 ata_dev_printk(dev, KERN_INFO,
1562 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1563 __FUNCTION__, ap->id, dev->devno);
1567 if (ata_msg_probe(ap))
1568 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1569 __FUNCTION__, ap->id, dev->devno);
1571 /* print device capabilities */
1572 if (ata_msg_probe(ap))
1573 ata_dev_printk(dev, KERN_DEBUG,
1574 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1575 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1577 id[49], id[82], id[83], id[84],
1578 id[85], id[86], id[87], id[88]);
1580 /* initialize to-be-configured parameters */
1581 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1582 dev->max_sectors = 0;
1590 * common ATA, ATAPI feature tests
1593 /* find max transfer mode; for printk only */
1594 xfer_mask = ata_id_xfermask(id);
1596 if (ata_msg_probe(ap))
1599 /* ATA-specific feature tests */
1600 if (dev->class == ATA_DEV_ATA) {
1601 if (ata_id_is_cfa(id)) {
1602 if (id[162] & 1) /* CPRM may make this media unusable */
1603 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1604 ap->id, dev->devno);
1605 snprintf(revbuf, 7, "CFA");
1608 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1610 dev->n_sectors = ata_id_n_sectors(id);
1612 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1613 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1616 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1619 if (dev->id[59] & 0x100)
1620 dev->multi_count = dev->id[59] & 0xff;
1622 if (ata_id_has_lba(id)) {
1623 const char *lba_desc;
1627 dev->flags |= ATA_DFLAG_LBA;
1628 if (ata_id_has_lba48(id)) {
1629 dev->flags |= ATA_DFLAG_LBA48;
1632 if (dev->n_sectors >= (1UL << 28) &&
1633 ata_id_has_flush_ext(id))
1634 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1638 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1640 /* print device info to dmesg */
1641 if (ata_msg_drv(ap) && print_info) {
1642 ata_dev_printk(dev, KERN_INFO,
1643 "%s: %s, %s, max %s\n",
1644 revbuf, modelbuf, fwrevbuf,
1645 ata_mode_string(xfer_mask));
1646 ata_dev_printk(dev, KERN_INFO,
1647 "%Lu sectors, multi %u: %s %s\n",
1648 (unsigned long long)dev->n_sectors,
1649 dev->multi_count, lba_desc, ncq_desc);
1654 /* Default translation */
1655 dev->cylinders = id[1];
1657 dev->sectors = id[6];
1659 if (ata_id_current_chs_valid(id)) {
1660 /* Current CHS translation is valid. */
1661 dev->cylinders = id[54];
1662 dev->heads = id[55];
1663 dev->sectors = id[56];
1666 /* print device info to dmesg */
1667 if (ata_msg_drv(ap) && print_info) {
1668 ata_dev_printk(dev, KERN_INFO,
1669 "%s: %s, %s, max %s\n",
1670 revbuf, modelbuf, fwrevbuf,
1671 ata_mode_string(xfer_mask));
1672 ata_dev_printk(dev, KERN_INFO,
1673 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1674 (unsigned long long)dev->n_sectors,
1675 dev->multi_count, dev->cylinders,
1676 dev->heads, dev->sectors);
1683 /* ATAPI-specific feature tests */
1684 else if (dev->class == ATA_DEV_ATAPI) {
1685 char *cdb_intr_string = "";
1687 rc = atapi_cdb_len(id);
1688 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1689 if (ata_msg_warn(ap))
1690 ata_dev_printk(dev, KERN_WARNING,
1691 "unsupported CDB len\n");
1695 dev->cdb_len = (unsigned int) rc;
1697 if (ata_id_cdb_intr(dev->id)) {
1698 dev->flags |= ATA_DFLAG_CDB_INTR;
1699 cdb_intr_string = ", CDB intr";
1702 /* print device info to dmesg */
1703 if (ata_msg_drv(ap) && print_info)
1704 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1705 ata_mode_string(xfer_mask),
1709 /* determine max_sectors */
1710 dev->max_sectors = ATA_MAX_SECTORS;
1711 if (dev->flags & ATA_DFLAG_LBA48)
1712 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1714 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1715 /* Let the user know. We don't want to disallow opens for
1716 rescue purposes, or in case the vendor is just a blithering
1719 ata_dev_printk(dev, KERN_WARNING,
1720 "Drive reports diagnostics failure. This may indicate a drive\n");
1721 ata_dev_printk(dev, KERN_WARNING,
1722 "fault or invalid emulation. Contact drive vendor for information.\n");
1726 ata_set_port_max_cmd_len(ap);
1728 /* limit bridge transfers to udma5, 200 sectors */
1729 if (ata_dev_knobble(dev)) {
1730 if (ata_msg_drv(ap) && print_info)
1731 ata_dev_printk(dev, KERN_INFO,
1732 "applying bridge limits\n");
1733 dev->udma_mask &= ATA_UDMA5;
1734 dev->max_sectors = ATA_MAX_SECTORS;
1737 if (ap->ops->dev_config)
1738 ap->ops->dev_config(ap, dev);
1741 rc = ata_acpi_push_id(ap, dev->devno);
1743 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1748 if (ata_msg_probe(ap))
1749 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1750 __FUNCTION__, ata_chk_status(ap));
1754 if (ata_msg_probe(ap))
1755 ata_dev_printk(dev, KERN_DEBUG,
1756 "%s: EXIT, err\n", __FUNCTION__);
1761 * ata_bus_probe - Reset and probe ATA bus
1764 * Master ATA bus probing function. Initiates a hardware-dependent
1765 * bus reset, then attempts to identify any devices found on
1769 * PCI/etc. bus probe sem.
1772 * Zero on success, negative errno otherwise.
1775 int ata_bus_probe(struct ata_port *ap)
1777 unsigned int classes[ATA_MAX_DEVICES];
1778 int tries[ATA_MAX_DEVICES];
1779 int i, rc, down_xfermask;
1780 struct ata_device *dev;
1784 for (i = 0; i < ATA_MAX_DEVICES; i++)
1785 tries[i] = ATA_PROBE_MAX_TRIES;
1790 /* reset and determine device classes */
1791 ap->ops->phy_reset(ap);
1793 /* retrieve and execute the ATA task file of _GTF */
1794 ata_acpi_exec_tfs(ap);
1796 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1797 dev = &ap->device[i];
1799 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1800 dev->class != ATA_DEV_UNKNOWN)
1801 classes[dev->devno] = dev->class;
1803 classes[dev->devno] = ATA_DEV_NONE;
1805 dev->class = ATA_DEV_UNKNOWN;
1810 /* after the reset the device state is PIO 0 and the controller
1811 state is undefined. Record the mode */
1813 for (i = 0; i < ATA_MAX_DEVICES; i++)
1814 ap->device[i].pio_mode = XFER_PIO_0;
1816 /* read IDENTIFY page and configure devices */
1817 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1818 dev = &ap->device[i];
1821 dev->class = classes[i];
1823 if (!ata_dev_enabled(dev))
1826 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1831 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1832 rc = ata_dev_configure(dev);
1833 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
1838 /* configure transfer mode */
1839 rc = ata_set_mode(ap, &dev);
1845 for (i = 0; i < ATA_MAX_DEVICES; i++)
1846 if (ata_dev_enabled(&ap->device[i]))
1849 /* no device present, disable port */
1850 ata_port_disable(ap);
1851 ap->ops->port_disable(ap);
1858 tries[dev->devno] = 0;
1861 sata_down_spd_limit(ap);
1864 tries[dev->devno]--;
1865 if (down_xfermask &&
1866 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
1867 tries[dev->devno] = 0;
1870 if (!tries[dev->devno]) {
1871 ata_down_xfermask_limit(dev, 1);
1872 ata_dev_disable(dev);
1879 * ata_port_probe - Mark port as enabled
1880 * @ap: Port for which we indicate enablement
1882 * Modify @ap data structure such that the system
1883 * thinks that the entire port is enabled.
1885 * LOCKING: host lock, or some other form of
1889 void ata_port_probe(struct ata_port *ap)
1891 ap->flags &= ~ATA_FLAG_DISABLED;
1895 * sata_print_link_status - Print SATA link status
1896 * @ap: SATA port to printk link status about
1898 * This function prints link speed and status of a SATA link.
1903 static void sata_print_link_status(struct ata_port *ap)
1905 u32 sstatus, scontrol, tmp;
1907 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
1909 sata_scr_read(ap, SCR_CONTROL, &scontrol);
1911 if (ata_port_online(ap)) {
1912 tmp = (sstatus >> 4) & 0xf;
1913 ata_port_printk(ap, KERN_INFO,
1914 "SATA link up %s (SStatus %X SControl %X)\n",
1915 sata_spd_string(tmp), sstatus, scontrol);
1917 ata_port_printk(ap, KERN_INFO,
1918 "SATA link down (SStatus %X SControl %X)\n",
1924 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1925 * @ap: SATA port associated with target SATA PHY.
1927 * This function issues commands to standard SATA Sxxx
1928 * PHY registers, to wake up the phy (and device), and
1929 * clear any reset condition.
1932 * PCI/etc. bus probe sem.
1935 void __sata_phy_reset(struct ata_port *ap)
1938 unsigned long timeout = jiffies + (HZ * 5);
1940 if (ap->flags & ATA_FLAG_SATA_RESET) {
1941 /* issue phy wake/reset */
1942 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
1943 /* Couldn't find anything in SATA I/II specs, but
1944 * AHCI-1.1 10.4.2 says at least 1 ms. */
1947 /* phy wake/clear reset */
1948 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1950 /* wait for phy to become ready, if necessary */
1953 sata_scr_read(ap, SCR_STATUS, &sstatus);
1954 if ((sstatus & 0xf) != 1)
1956 } while (time_before(jiffies, timeout));
1958 /* print link status */
1959 sata_print_link_status(ap);
1961 /* TODO: phy layer with polling, timeouts, etc. */
1962 if (!ata_port_offline(ap))
1965 ata_port_disable(ap);
1967 if (ap->flags & ATA_FLAG_DISABLED)
1970 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1971 ata_port_disable(ap);
1975 ap->cbl = ATA_CBL_SATA;
1979 * sata_phy_reset - Reset SATA bus.
1980 * @ap: SATA port associated with target SATA PHY.
1982 * This function resets the SATA bus, and then probes
1983 * the bus for devices.
1986 * PCI/etc. bus probe sem.
1989 void sata_phy_reset(struct ata_port *ap)
1991 __sata_phy_reset(ap);
1992 if (ap->flags & ATA_FLAG_DISABLED)
1998 * ata_dev_pair - return other device on cable
2001 * Obtain the other device on the same cable, or if none is
2002 * present NULL is returned
2005 struct ata_device *ata_dev_pair(struct ata_device *adev)
2007 struct ata_port *ap = adev->ap;
2008 struct ata_device *pair = &ap->device[1 - adev->devno];
2009 if (!ata_dev_enabled(pair))
2015 * ata_port_disable - Disable port.
2016 * @ap: Port to be disabled.
2018 * Modify @ap data structure such that the system
2019 * thinks that the entire port is disabled, and should
2020 * never attempt to probe or communicate with devices
2023 * LOCKING: host lock, or some other form of
2027 void ata_port_disable(struct ata_port *ap)
2029 ap->device[0].class = ATA_DEV_NONE;
2030 ap->device[1].class = ATA_DEV_NONE;
2031 ap->flags |= ATA_FLAG_DISABLED;
2035 * sata_down_spd_limit - adjust SATA spd limit downward
2036 * @ap: Port to adjust SATA spd limit for
2038 * Adjust SATA spd limit of @ap downward. Note that this
2039 * function only adjusts the limit. The change must be applied
2040 * using sata_set_spd().
2043 * Inherited from caller.
2046 * 0 on success, negative errno on failure
2048 int sata_down_spd_limit(struct ata_port *ap)
2050 u32 sstatus, spd, mask;
2053 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2057 mask = ap->sata_spd_limit;
2060 highbit = fls(mask) - 1;
2061 mask &= ~(1 << highbit);
2063 spd = (sstatus >> 4) & 0xf;
2067 mask &= (1 << spd) - 1;
2071 ap->sata_spd_limit = mask;
2073 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2074 sata_spd_string(fls(mask)));
2079 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2083 if (ap->sata_spd_limit == UINT_MAX)
2086 limit = fls(ap->sata_spd_limit);
2088 spd = (*scontrol >> 4) & 0xf;
2089 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2091 return spd != limit;
2095 * sata_set_spd_needed - is SATA spd configuration needed
2096 * @ap: Port in question
2098 * Test whether the spd limit in SControl matches
2099 * @ap->sata_spd_limit. This function is used to determine
2100 * whether hardreset is necessary to apply SATA spd
2104 * Inherited from caller.
2107 * 1 if SATA spd configuration is needed, 0 otherwise.
2109 int sata_set_spd_needed(struct ata_port *ap)
2113 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
2116 return __sata_set_spd_needed(ap, &scontrol);
2120 * sata_set_spd - set SATA spd according to spd limit
2121 * @ap: Port to set SATA spd for
2123 * Set SATA spd of @ap according to sata_spd_limit.
2126 * Inherited from caller.
2129 * 0 if spd doesn't need to be changed, 1 if spd has been
2130 * changed. Negative errno if SCR registers are inaccessible.
2132 int sata_set_spd(struct ata_port *ap)
2137 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2140 if (!__sata_set_spd_needed(ap, &scontrol))
2143 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2150 * This mode timing computation functionality is ported over from
2151 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2154 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2155 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2156 * for UDMA6, which is currently supported only by Maxtor drives.
2158 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2161 static const struct ata_timing ata_timing[] = {
2163 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2164 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2165 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2166 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2168 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2169 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2170 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2171 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2172 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2174 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2176 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2177 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2178 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2180 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2181 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2182 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2184 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2185 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2186 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2187 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2189 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2190 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2191 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2193 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2198 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2199 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2201 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2203 q->setup = EZ(t->setup * 1000, T);
2204 q->act8b = EZ(t->act8b * 1000, T);
2205 q->rec8b = EZ(t->rec8b * 1000, T);
2206 q->cyc8b = EZ(t->cyc8b * 1000, T);
2207 q->active = EZ(t->active * 1000, T);
2208 q->recover = EZ(t->recover * 1000, T);
2209 q->cycle = EZ(t->cycle * 1000, T);
2210 q->udma = EZ(t->udma * 1000, UT);
2213 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2214 struct ata_timing *m, unsigned int what)
2216 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2217 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2218 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2219 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2220 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2221 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2222 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2223 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2226 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2228 const struct ata_timing *t;
2230 for (t = ata_timing; t->mode != speed; t++)
2231 if (t->mode == 0xFF)
2236 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2237 struct ata_timing *t, int T, int UT)
2239 const struct ata_timing *s;
2240 struct ata_timing p;
2246 if (!(s = ata_timing_find_mode(speed)))
2249 memcpy(t, s, sizeof(*s));
2252 * If the drive is an EIDE drive, it can tell us it needs extended
2253 * PIO/MW_DMA cycle timing.
2256 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2257 memset(&p, 0, sizeof(p));
2258 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2259 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2260 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2261 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2262 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2264 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2268 * Convert the timing to bus clock counts.
2271 ata_timing_quantize(t, t, T, UT);
2274 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2275 * S.M.A.R.T * and some other commands. We have to ensure that the
2276 * DMA cycle timing is slower/equal than the fastest PIO timing.
2279 if (speed > XFER_PIO_6) {
2280 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2281 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2285 * Lengthen active & recovery time so that cycle time is correct.
2288 if (t->act8b + t->rec8b < t->cyc8b) {
2289 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2290 t->rec8b = t->cyc8b - t->act8b;
2293 if (t->active + t->recover < t->cycle) {
2294 t->active += (t->cycle - (t->active + t->recover)) / 2;
2295 t->recover = t->cycle - t->active;
2302 * ata_down_xfermask_limit - adjust dev xfer masks downward
2303 * @dev: Device to adjust xfer masks
2304 * @force_pio0: Force PIO0
2306 * Adjust xfer masks of @dev downward. Note that this function
2307 * does not apply the change. Invoking ata_set_mode() afterwards
2308 * will apply the limit.
2311 * Inherited from caller.
2314 * 0 on success, negative errno on failure
2316 int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
2318 unsigned long xfer_mask;
2321 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2326 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2327 if (xfer_mask & ATA_MASK_UDMA)
2328 xfer_mask &= ~ATA_MASK_MWDMA;
2330 highbit = fls(xfer_mask) - 1;
2331 xfer_mask &= ~(1 << highbit);
2333 xfer_mask &= 1 << ATA_SHIFT_PIO;
2337 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2340 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2341 ata_mode_string(xfer_mask));
2349 static int ata_dev_set_mode(struct ata_device *dev)
2351 struct ata_eh_context *ehc = &dev->ap->eh_context;
2352 unsigned int err_mask;
2355 dev->flags &= ~ATA_DFLAG_PIO;
2356 if (dev->xfer_shift == ATA_SHIFT_PIO)
2357 dev->flags |= ATA_DFLAG_PIO;
2359 err_mask = ata_dev_set_xfermode(dev);
2360 /* Old CFA may refuse this command, which is just fine */
2361 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2362 err_mask &= ~AC_ERR_DEV;
2365 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2366 "(err_mask=0x%x)\n", err_mask);
2370 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2371 rc = ata_dev_revalidate(dev, 0);
2372 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2376 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2377 dev->xfer_shift, (int)dev->xfer_mode);
2379 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2380 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2385 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2386 * @ap: port on which timings will be programmed
2387 * @r_failed_dev: out paramter for failed device
2389 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2390 * ata_set_mode() fails, pointer to the failing device is
2391 * returned in @r_failed_dev.
2394 * PCI/etc. bus probe sem.
2397 * 0 on success, negative errno otherwise
2399 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2401 struct ata_device *dev;
2402 int i, rc = 0, used_dma = 0, found = 0;
2404 /* has private set_mode? */
2405 if (ap->ops->set_mode)
2406 return ap->ops->set_mode(ap, r_failed_dev);
2408 /* step 1: calculate xfer_mask */
2409 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2410 unsigned int pio_mask, dma_mask;
2412 dev = &ap->device[i];
2414 if (!ata_dev_enabled(dev))
2417 ata_dev_xfermask(dev);
2419 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2420 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2421 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2422 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2431 /* step 2: always set host PIO timings */
2432 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2433 dev = &ap->device[i];
2434 if (!ata_dev_enabled(dev))
2437 if (!dev->pio_mode) {
2438 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2443 dev->xfer_mode = dev->pio_mode;
2444 dev->xfer_shift = ATA_SHIFT_PIO;
2445 if (ap->ops->set_piomode)
2446 ap->ops->set_piomode(ap, dev);
2449 /* step 3: set host DMA timings */
2450 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2451 dev = &ap->device[i];
2453 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2456 dev->xfer_mode = dev->dma_mode;
2457 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2458 if (ap->ops->set_dmamode)
2459 ap->ops->set_dmamode(ap, dev);
2462 /* step 4: update devices' xfer mode */
2463 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2464 dev = &ap->device[i];
2466 /* don't update suspended devices' xfer mode */
2467 if (!ata_dev_ready(dev))
2470 rc = ata_dev_set_mode(dev);
2475 /* Record simplex status. If we selected DMA then the other
2476 * host channels are not permitted to do so.
2478 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2479 ap->host->simplex_claimed = 1;
2481 /* step5: chip specific finalisation */
2482 if (ap->ops->post_set_mode)
2483 ap->ops->post_set_mode(ap);
2487 *r_failed_dev = dev;
2492 * ata_tf_to_host - issue ATA taskfile to host controller
2493 * @ap: port to which command is being issued
2494 * @tf: ATA taskfile register set
2496 * Issues ATA taskfile register set to ATA host controller,
2497 * with proper synchronization with interrupt handler and
2501 * spin_lock_irqsave(host lock)
2504 static inline void ata_tf_to_host(struct ata_port *ap,
2505 const struct ata_taskfile *tf)
2507 ap->ops->tf_load(ap, tf);
2508 ap->ops->exec_command(ap, tf);
2512 * ata_busy_sleep - sleep until BSY clears, or timeout
2513 * @ap: port containing status register to be polled
2514 * @tmout_pat: impatience timeout
2515 * @tmout: overall timeout
2517 * Sleep until ATA Status register bit BSY clears,
2518 * or a timeout occurs.
2521 * Kernel thread context (may sleep).
2524 * 0 on success, -errno otherwise.
2526 int ata_busy_sleep(struct ata_port *ap,
2527 unsigned long tmout_pat, unsigned long tmout)
2529 unsigned long timer_start, timeout;
2532 status = ata_busy_wait(ap, ATA_BUSY, 300);
2533 timer_start = jiffies;
2534 timeout = timer_start + tmout_pat;
2535 while (status != 0xff && (status & ATA_BUSY) &&
2536 time_before(jiffies, timeout)) {
2538 status = ata_busy_wait(ap, ATA_BUSY, 3);
2541 if (status != 0xff && (status & ATA_BUSY))
2542 ata_port_printk(ap, KERN_WARNING,
2543 "port is slow to respond, please be patient "
2544 "(Status 0x%x)\n", status);
2546 timeout = timer_start + tmout;
2547 while (status != 0xff && (status & ATA_BUSY) &&
2548 time_before(jiffies, timeout)) {
2550 status = ata_chk_status(ap);
2556 if (status & ATA_BUSY) {
2557 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2558 "(%lu secs, Status 0x%x)\n",
2559 tmout / HZ, status);
2566 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2568 struct ata_ioports *ioaddr = &ap->ioaddr;
2569 unsigned int dev0 = devmask & (1 << 0);
2570 unsigned int dev1 = devmask & (1 << 1);
2571 unsigned long timeout;
2573 /* if device 0 was found in ata_devchk, wait for its
2577 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2579 /* if device 1 was found in ata_devchk, wait for
2580 * register access, then wait for BSY to clear
2582 timeout = jiffies + ATA_TMOUT_BOOT;
2586 ap->ops->dev_select(ap, 1);
2587 nsect = ioread8(ioaddr->nsect_addr);
2588 lbal = ioread8(ioaddr->lbal_addr);
2589 if ((nsect == 1) && (lbal == 1))
2591 if (time_after(jiffies, timeout)) {
2595 msleep(50); /* give drive a breather */
2598 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2600 /* is all this really necessary? */
2601 ap->ops->dev_select(ap, 0);
2603 ap->ops->dev_select(ap, 1);
2605 ap->ops->dev_select(ap, 0);
2608 static unsigned int ata_bus_softreset(struct ata_port *ap,
2609 unsigned int devmask)
2611 struct ata_ioports *ioaddr = &ap->ioaddr;
2613 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2615 /* software reset. causes dev0 to be selected */
2616 iowrite8(ap->ctl, ioaddr->ctl_addr);
2617 udelay(20); /* FIXME: flush */
2618 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2619 udelay(20); /* FIXME: flush */
2620 iowrite8(ap->ctl, ioaddr->ctl_addr);
2622 /* spec mandates ">= 2ms" before checking status.
2623 * We wait 150ms, because that was the magic delay used for
2624 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2625 * between when the ATA command register is written, and then
2626 * status is checked. Because waiting for "a while" before
2627 * checking status is fine, post SRST, we perform this magic
2628 * delay here as well.
2630 * Old drivers/ide uses the 2mS rule and then waits for ready
2634 /* Before we perform post reset processing we want to see if
2635 * the bus shows 0xFF because the odd clown forgets the D7
2636 * pulldown resistor.
2638 if (ata_check_status(ap) == 0xFF)
2641 ata_bus_post_reset(ap, devmask);
2647 * ata_bus_reset - reset host port and associated ATA channel
2648 * @ap: port to reset
2650 * This is typically the first time we actually start issuing
2651 * commands to the ATA channel. We wait for BSY to clear, then
2652 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2653 * result. Determine what devices, if any, are on the channel
2654 * by looking at the device 0/1 error register. Look at the signature
2655 * stored in each device's taskfile registers, to determine if
2656 * the device is ATA or ATAPI.
2659 * PCI/etc. bus probe sem.
2660 * Obtains host lock.
2663 * Sets ATA_FLAG_DISABLED if bus reset fails.
2666 void ata_bus_reset(struct ata_port *ap)
2668 struct ata_ioports *ioaddr = &ap->ioaddr;
2669 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2671 unsigned int dev0, dev1 = 0, devmask = 0;
2673 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2675 /* determine if device 0/1 are present */
2676 if (ap->flags & ATA_FLAG_SATA_RESET)
2679 dev0 = ata_devchk(ap, 0);
2681 dev1 = ata_devchk(ap, 1);
2685 devmask |= (1 << 0);
2687 devmask |= (1 << 1);
2689 /* select device 0 again */
2690 ap->ops->dev_select(ap, 0);
2692 /* issue bus reset */
2693 if (ap->flags & ATA_FLAG_SRST)
2694 if (ata_bus_softreset(ap, devmask))
2698 * determine by signature whether we have ATA or ATAPI devices
2700 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2701 if ((slave_possible) && (err != 0x81))
2702 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2704 /* re-enable interrupts */
2705 ap->ops->irq_on(ap);
2707 /* is double-select really necessary? */
2708 if (ap->device[1].class != ATA_DEV_NONE)
2709 ap->ops->dev_select(ap, 1);
2710 if (ap->device[0].class != ATA_DEV_NONE)
2711 ap->ops->dev_select(ap, 0);
2713 /* if no devices were detected, disable this port */
2714 if ((ap->device[0].class == ATA_DEV_NONE) &&
2715 (ap->device[1].class == ATA_DEV_NONE))
2718 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2719 /* set up device control for ATA_FLAG_SATA_RESET */
2720 iowrite8(ap->ctl, ioaddr->ctl_addr);
2727 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2728 ap->ops->port_disable(ap);
2734 * sata_phy_debounce - debounce SATA phy status
2735 * @ap: ATA port to debounce SATA phy status for
2736 * @params: timing parameters { interval, duratinon, timeout } in msec
2738 * Make sure SStatus of @ap reaches stable state, determined by
2739 * holding the same value where DET is not 1 for @duration polled
2740 * every @interval, before @timeout. Timeout constraints the
2741 * beginning of the stable state. Because, after hot unplugging,
2742 * DET gets stuck at 1 on some controllers, this functions waits
2743 * until timeout then returns 0 if DET is stable at 1.
2746 * Kernel thread context (may sleep)
2749 * 0 on success, -errno on failure.
2751 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2753 unsigned long interval_msec = params[0];
2754 unsigned long duration = params[1] * HZ / 1000;
2755 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2756 unsigned long last_jiffies;
2760 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2765 last_jiffies = jiffies;
2768 msleep(interval_msec);
2769 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2775 if (cur == 1 && time_before(jiffies, timeout))
2777 if (time_after(jiffies, last_jiffies + duration))
2782 /* unstable, start over */
2784 last_jiffies = jiffies;
2787 if (time_after(jiffies, timeout))
2793 * sata_phy_resume - resume SATA phy
2794 * @ap: ATA port to resume SATA phy for
2795 * @params: timing parameters { interval, duratinon, timeout } in msec
2797 * Resume SATA phy of @ap and debounce it.
2800 * Kernel thread context (may sleep)
2803 * 0 on success, -errno on failure.
2805 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2810 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2813 scontrol = (scontrol & 0x0f0) | 0x300;
2815 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2818 /* Some PHYs react badly if SStatus is pounded immediately
2819 * after resuming. Delay 200ms before debouncing.
2823 return sata_phy_debounce(ap, params);
2826 static void ata_wait_spinup(struct ata_port *ap)
2828 struct ata_eh_context *ehc = &ap->eh_context;
2829 unsigned long end, secs;
2832 /* first, debounce phy if SATA */
2833 if (ap->cbl == ATA_CBL_SATA) {
2834 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
2836 /* if debounced successfully and offline, no need to wait */
2837 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2841 /* okay, let's give the drive time to spin up */
2842 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2843 secs = ((end - jiffies) + HZ - 1) / HZ;
2845 if (time_after(jiffies, end))
2849 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2850 "(%lu secs)\n", secs);
2852 schedule_timeout_uninterruptible(end - jiffies);
2856 * ata_std_prereset - prepare for reset
2857 * @ap: ATA port to be reset
2859 * @ap is about to be reset. Initialize it.
2862 * Kernel thread context (may sleep)
2865 * 0 on success, -errno otherwise.
2867 int ata_std_prereset(struct ata_port *ap)
2869 struct ata_eh_context *ehc = &ap->eh_context;
2870 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2873 /* handle link resume & hotplug spinup */
2874 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2875 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2876 ehc->i.action |= ATA_EH_HARDRESET;
2878 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2879 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2880 ata_wait_spinup(ap);
2882 /* if we're about to do hardreset, nothing more to do */
2883 if (ehc->i.action & ATA_EH_HARDRESET)
2886 /* if SATA, resume phy */
2887 if (ap->cbl == ATA_CBL_SATA) {
2888 rc = sata_phy_resume(ap, timing);
2889 if (rc && rc != -EOPNOTSUPP) {
2890 /* phy resume failed */
2891 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2892 "link for reset (errno=%d)\n", rc);
2897 /* Wait for !BSY if the controller can wait for the first D2H
2898 * Reg FIS and we don't know that no device is attached.
2900 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2901 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2907 * ata_std_softreset - reset host port via ATA SRST
2908 * @ap: port to reset
2909 * @classes: resulting classes of attached devices
2911 * Reset host port using ATA SRST.
2914 * Kernel thread context (may sleep)
2917 * 0 on success, -errno otherwise.
2919 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
2921 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2922 unsigned int devmask = 0, err_mask;
2927 if (ata_port_offline(ap)) {
2928 classes[0] = ATA_DEV_NONE;
2932 /* determine if device 0/1 are present */
2933 if (ata_devchk(ap, 0))
2934 devmask |= (1 << 0);
2935 if (slave_possible && ata_devchk(ap, 1))
2936 devmask |= (1 << 1);
2938 /* select device 0 again */
2939 ap->ops->dev_select(ap, 0);
2941 /* issue bus reset */
2942 DPRINTK("about to softreset, devmask=%x\n", devmask);
2943 err_mask = ata_bus_softreset(ap, devmask);
2945 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2950 /* determine by signature whether we have ATA or ATAPI devices */
2951 classes[0] = ata_dev_try_classify(ap, 0, &err);
2952 if (slave_possible && err != 0x81)
2953 classes[1] = ata_dev_try_classify(ap, 1, &err);
2956 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2961 * sata_port_hardreset - reset port via SATA phy reset
2962 * @ap: port to reset
2963 * @timing: timing parameters { interval, duratinon, timeout } in msec
2965 * SATA phy-reset host port using DET bits of SControl register.
2968 * Kernel thread context (may sleep)
2971 * 0 on success, -errno otherwise.
2973 int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
2980 if (sata_set_spd_needed(ap)) {
2981 /* SATA spec says nothing about how to reconfigure
2982 * spd. To be on the safe side, turn off phy during
2983 * reconfiguration. This works for at least ICH7 AHCI
2986 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2989 scontrol = (scontrol & 0x0f0) | 0x304;
2991 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2997 /* issue phy wake/reset */
2998 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3001 scontrol = (scontrol & 0x0f0) | 0x301;
3003 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
3006 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3007 * 10.4.2 says at least 1 ms.
3011 /* bring phy back */
3012 rc = sata_phy_resume(ap, timing);
3014 DPRINTK("EXIT, rc=%d\n", rc);
3019 * sata_std_hardreset - reset host port via SATA phy reset
3020 * @ap: port to reset
3021 * @class: resulting class of attached device
3023 * SATA phy-reset host port using DET bits of SControl register,
3024 * wait for !BSY and classify the attached device.
3027 * Kernel thread context (may sleep)
3030 * 0 on success, -errno otherwise.
3032 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3034 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3040 rc = sata_port_hardreset(ap, timing);
3042 ata_port_printk(ap, KERN_ERR,
3043 "COMRESET failed (errno=%d)\n", rc);
3047 /* TODO: phy layer with polling, timeouts, etc. */
3048 if (ata_port_offline(ap)) {
3049 *class = ATA_DEV_NONE;
3050 DPRINTK("EXIT, link offline\n");
3054 /* wait a while before checking status, see SRST for more info */
3057 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
3058 ata_port_printk(ap, KERN_ERR,
3059 "COMRESET failed (device not ready)\n");
3063 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3065 *class = ata_dev_try_classify(ap, 0, NULL);
3067 DPRINTK("EXIT, class=%u\n", *class);
3072 * ata_std_postreset - standard postreset callback
3073 * @ap: the target ata_port
3074 * @classes: classes of attached devices
3076 * This function is invoked after a successful reset. Note that
3077 * the device might have been reset more than once using
3078 * different reset methods before postreset is invoked.
3081 * Kernel thread context (may sleep)
3083 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3089 /* print link status */
3090 sata_print_link_status(ap);
3093 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3094 sata_scr_write(ap, SCR_ERROR, serror);
3096 /* re-enable interrupts */
3097 if (!ap->ops->error_handler)
3098 ap->ops->irq_on(ap);
3100 /* is double-select really necessary? */
3101 if (classes[0] != ATA_DEV_NONE)
3102 ap->ops->dev_select(ap, 1);
3103 if (classes[1] != ATA_DEV_NONE)
3104 ap->ops->dev_select(ap, 0);
3106 /* bail out if no device is present */
3107 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3108 DPRINTK("EXIT, no device\n");
3112 /* set up device control */
3113 if (ap->ioaddr.ctl_addr)
3114 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3120 * ata_dev_same_device - Determine whether new ID matches configured device
3121 * @dev: device to compare against
3122 * @new_class: class of the new device
3123 * @new_id: IDENTIFY page of the new device
3125 * Compare @new_class and @new_id against @dev and determine
3126 * whether @dev is the device indicated by @new_class and
3133 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3135 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3138 const u16 *old_id = dev->id;
3139 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3140 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3143 if (dev->class != new_class) {
3144 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3145 dev->class, new_class);
3149 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3150 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3151 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3152 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3153 new_n_sectors = ata_id_n_sectors(new_id);
3155 if (strcmp(model[0], model[1])) {
3156 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3157 "'%s' != '%s'\n", model[0], model[1]);
3161 if (strcmp(serial[0], serial[1])) {
3162 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3163 "'%s' != '%s'\n", serial[0], serial[1]);
3167 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
3168 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3170 (unsigned long long)dev->n_sectors,
3171 (unsigned long long)new_n_sectors);
3179 * ata_dev_revalidate - Revalidate ATA device
3180 * @dev: device to revalidate
3181 * @readid_flags: read ID flags
3183 * Re-read IDENTIFY page and make sure @dev is still attached to
3187 * Kernel thread context (may sleep)
3190 * 0 on success, negative errno otherwise
3192 int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3194 unsigned int class = dev->class;
3195 u16 *id = (void *)dev->ap->sector_buf;
3198 if (!ata_dev_enabled(dev)) {
3204 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3208 /* is the device still there? */
3209 if (!ata_dev_same_device(dev, class, id)) {
3214 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3216 /* configure device according to the new ID */
3217 rc = ata_dev_configure(dev);
3222 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3226 struct ata_blacklist_entry {
3227 const char *model_num;
3228 const char *model_rev;
3229 unsigned long horkage;
3232 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3233 /* Devices with DMA related problems under Linux */
3234 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3235 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3236 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3237 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3238 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3239 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3240 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3241 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3242 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3243 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3244 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3245 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3246 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3247 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3248 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3249 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3250 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3251 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3252 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3253 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3254 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3255 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3256 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3257 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3258 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3259 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3260 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3261 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3262 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3263 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3265 /* Devices we expect to fail diagnostics */
3267 /* Devices where NCQ should be avoided */
3269 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3271 /* Devices with NCQ limits */
3277 unsigned long ata_device_blacklisted(const struct ata_device *dev)
3279 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3280 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3281 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3283 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3284 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3286 while (ad->model_num) {
3287 if (!strcmp(ad->model_num, model_num)) {
3288 if (ad->model_rev == NULL)
3290 if (!strcmp(ad->model_rev, model_rev))
3298 static int ata_dma_blacklisted(const struct ata_device *dev)
3300 /* We don't support polling DMA.
3301 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3302 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3304 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3305 (dev->flags & ATA_DFLAG_CDB_INTR))
3307 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3311 * ata_dev_xfermask - Compute supported xfermask of the given device
3312 * @dev: Device to compute xfermask for
3314 * Compute supported xfermask of @dev and store it in
3315 * dev->*_mask. This function is responsible for applying all
3316 * known limits including host controller limits, device
3322 static void ata_dev_xfermask(struct ata_device *dev)
3324 struct ata_port *ap = dev->ap;
3325 struct ata_host *host = ap->host;
3326 unsigned long xfer_mask;
3328 /* controller modes available */
3329 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3330 ap->mwdma_mask, ap->udma_mask);
3332 /* Apply cable rule here. Don't apply it early because when
3333 * we handle hot plug the cable type can itself change.
3335 if (ap->cbl == ATA_CBL_PATA40)
3336 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3337 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3338 * host side are checked drive side as well. Cases where we know a
3339 * 40wire cable is used safely for 80 are not checked here.
3341 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3342 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3345 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3346 dev->mwdma_mask, dev->udma_mask);
3347 xfer_mask &= ata_id_xfermask(dev->id);
3350 * CFA Advanced TrueIDE timings are not allowed on a shared
3353 if (ata_dev_pair(dev)) {
3354 /* No PIO5 or PIO6 */
3355 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3356 /* No MWDMA3 or MWDMA 4 */
3357 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3360 if (ata_dma_blacklisted(dev)) {
3361 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3362 ata_dev_printk(dev, KERN_WARNING,
3363 "device is on DMA blacklist, disabling DMA\n");
3366 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
3367 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3368 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3369 "other device, disabling DMA\n");
3372 if (ap->ops->mode_filter)
3373 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3375 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3376 &dev->mwdma_mask, &dev->udma_mask);
3380 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3381 * @dev: Device to which command will be sent
3383 * Issue SET FEATURES - XFER MODE command to device @dev
3387 * PCI/etc. bus probe sem.
3390 * 0 on success, AC_ERR_* mask otherwise.
3393 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3395 struct ata_taskfile tf;
3396 unsigned int err_mask;
3398 /* set up set-features taskfile */
3399 DPRINTK("set features - xfer mode\n");
3401 ata_tf_init(dev, &tf);
3402 tf.command = ATA_CMD_SET_FEATURES;
3403 tf.feature = SETFEATURES_XFER;
3404 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3405 tf.protocol = ATA_PROT_NODATA;
3406 tf.nsect = dev->xfer_mode;
3408 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3410 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3415 * ata_dev_init_params - Issue INIT DEV PARAMS command
3416 * @dev: Device to which command will be sent
3417 * @heads: Number of heads (taskfile parameter)
3418 * @sectors: Number of sectors (taskfile parameter)
3421 * Kernel thread context (may sleep)
3424 * 0 on success, AC_ERR_* mask otherwise.
3426 static unsigned int ata_dev_init_params(struct ata_device *dev,
3427 u16 heads, u16 sectors)
3429 struct ata_taskfile tf;
3430 unsigned int err_mask;
3432 /* Number of sectors per track 1-255. Number of heads 1-16 */
3433 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3434 return AC_ERR_INVALID;
3436 /* set up init dev params taskfile */
3437 DPRINTK("init dev params \n");
3439 ata_tf_init(dev, &tf);
3440 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3441 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3442 tf.protocol = ATA_PROT_NODATA;
3444 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3446 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3448 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3453 * ata_sg_clean - Unmap DMA memory associated with command
3454 * @qc: Command containing DMA memory to be released
3456 * Unmap all mapped DMA memory associated with this command.
3459 * spin_lock_irqsave(host lock)
3461 void ata_sg_clean(struct ata_queued_cmd *qc)
3463 struct ata_port *ap = qc->ap;
3464 struct scatterlist *sg = qc->__sg;
3465 int dir = qc->dma_dir;
3466 void *pad_buf = NULL;
3468 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3469 WARN_ON(sg == NULL);
3471 if (qc->flags & ATA_QCFLAG_SINGLE)
3472 WARN_ON(qc->n_elem > 1);
3474 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3476 /* if we padded the buffer out to 32-bit bound, and data
3477 * xfer direction is from-device, we must copy from the
3478 * pad buffer back into the supplied buffer
3480 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3481 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3483 if (qc->flags & ATA_QCFLAG_SG) {
3485 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3486 /* restore last sg */
3487 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3489 struct scatterlist *psg = &qc->pad_sgent;
3490 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3491 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3492 kunmap_atomic(addr, KM_IRQ0);
3496 dma_unmap_single(ap->dev,
3497 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3500 sg->length += qc->pad_len;
3502 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3503 pad_buf, qc->pad_len);
3506 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3511 * ata_fill_sg - Fill PCI IDE PRD table
3512 * @qc: Metadata associated with taskfile to be transferred
3514 * Fill PCI IDE PRD (scatter-gather) table with segments
3515 * associated with the current disk command.
3518 * spin_lock_irqsave(host lock)
3521 static void ata_fill_sg(struct ata_queued_cmd *qc)
3523 struct ata_port *ap = qc->ap;
3524 struct scatterlist *sg;
3527 WARN_ON(qc->__sg == NULL);
3528 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3531 ata_for_each_sg(sg, qc) {
3535 /* determine if physical DMA addr spans 64K boundary.
3536 * Note h/w doesn't support 64-bit, so we unconditionally
3537 * truncate dma_addr_t to u32.
3539 addr = (u32) sg_dma_address(sg);
3540 sg_len = sg_dma_len(sg);
3543 offset = addr & 0xffff;
3545 if ((offset + sg_len) > 0x10000)
3546 len = 0x10000 - offset;
3548 ap->prd[idx].addr = cpu_to_le32(addr);
3549 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3550 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3559 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3562 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3563 * @qc: Metadata associated with taskfile to check
3565 * Allow low-level driver to filter ATA PACKET commands, returning
3566 * a status indicating whether or not it is OK to use DMA for the
3567 * supplied PACKET command.
3570 * spin_lock_irqsave(host lock)
3572 * RETURNS: 0 when ATAPI DMA can be used
3575 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3577 struct ata_port *ap = qc->ap;
3578 int rc = 0; /* Assume ATAPI DMA is OK by default */
3580 if (ap->ops->check_atapi_dma)
3581 rc = ap->ops->check_atapi_dma(qc);
3586 * ata_qc_prep - Prepare taskfile for submission
3587 * @qc: Metadata associated with taskfile to be prepared
3589 * Prepare ATA taskfile for submission.
3592 * spin_lock_irqsave(host lock)
3594 void ata_qc_prep(struct ata_queued_cmd *qc)
3596 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3602 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3605 * ata_sg_init_one - Associate command with memory buffer
3606 * @qc: Command to be associated
3607 * @buf: Memory buffer
3608 * @buflen: Length of memory buffer, in bytes.
3610 * Initialize the data-related elements of queued_cmd @qc
3611 * to point to a single memory buffer, @buf of byte length @buflen.
3614 * spin_lock_irqsave(host lock)
3617 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3619 qc->flags |= ATA_QCFLAG_SINGLE;
3621 qc->__sg = &qc->sgent;
3623 qc->orig_n_elem = 1;
3625 qc->nbytes = buflen;
3627 sg_init_one(&qc->sgent, buf, buflen);
3631 * ata_sg_init - Associate command with scatter-gather table.
3632 * @qc: Command to be associated
3633 * @sg: Scatter-gather table.
3634 * @n_elem: Number of elements in s/g table.
3636 * Initialize the data-related elements of queued_cmd @qc
3637 * to point to a scatter-gather table @sg, containing @n_elem
3641 * spin_lock_irqsave(host lock)
3644 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3645 unsigned int n_elem)
3647 qc->flags |= ATA_QCFLAG_SG;
3649 qc->n_elem = n_elem;
3650 qc->orig_n_elem = n_elem;
3654 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3655 * @qc: Command with memory buffer to be mapped.
3657 * DMA-map the memory buffer associated with queued_cmd @qc.
3660 * spin_lock_irqsave(host lock)
3663 * Zero on success, negative on error.
3666 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3668 struct ata_port *ap = qc->ap;
3669 int dir = qc->dma_dir;
3670 struct scatterlist *sg = qc->__sg;
3671 dma_addr_t dma_address;
3674 /* we must lengthen transfers to end on a 32-bit boundary */
3675 qc->pad_len = sg->length & 3;
3677 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3678 struct scatterlist *psg = &qc->pad_sgent;
3680 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3682 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3684 if (qc->tf.flags & ATA_TFLAG_WRITE)
3685 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3688 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3689 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3691 sg->length -= qc->pad_len;
3692 if (sg->length == 0)
3695 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3696 sg->length, qc->pad_len);
3704 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3706 if (dma_mapping_error(dma_address)) {
3708 sg->length += qc->pad_len;
3712 sg_dma_address(sg) = dma_address;
3713 sg_dma_len(sg) = sg->length;
3716 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3717 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3723 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3724 * @qc: Command with scatter-gather table to be mapped.
3726 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3729 * spin_lock_irqsave(host lock)
3732 * Zero on success, negative on error.
3736 static int ata_sg_setup(struct ata_queued_cmd *qc)
3738 struct ata_port *ap = qc->ap;
3739 struct scatterlist *sg = qc->__sg;
3740 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3741 int n_elem, pre_n_elem, dir, trim_sg = 0;
3743 VPRINTK("ENTER, ata%u\n", ap->id);
3744 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3746 /* we must lengthen transfers to end on a 32-bit boundary */
3747 qc->pad_len = lsg->length & 3;
3749 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3750 struct scatterlist *psg = &qc->pad_sgent;
3751 unsigned int offset;
3753 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3755 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3758 * psg->page/offset are used to copy to-be-written
3759 * data in this function or read data in ata_sg_clean.
3761 offset = lsg->offset + lsg->length - qc->pad_len;
3762 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3763 psg->offset = offset_in_page(offset);
3765 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3766 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3767 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3768 kunmap_atomic(addr, KM_IRQ0);
3771 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3772 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3774 lsg->length -= qc->pad_len;
3775 if (lsg->length == 0)
3778 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3779 qc->n_elem - 1, lsg->length, qc->pad_len);
3782 pre_n_elem = qc->n_elem;
3783 if (trim_sg && pre_n_elem)
3792 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3794 /* restore last sg */
3795 lsg->length += qc->pad_len;
3799 DPRINTK("%d sg elements mapped\n", n_elem);
3802 qc->n_elem = n_elem;
3808 * swap_buf_le16 - swap halves of 16-bit words in place
3809 * @buf: Buffer to swap
3810 * @buf_words: Number of 16-bit words in buffer.
3812 * Swap halves of 16-bit words if needed to convert from
3813 * little-endian byte order to native cpu byte order, or
3817 * Inherited from caller.
3819 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3824 for (i = 0; i < buf_words; i++)
3825 buf[i] = le16_to_cpu(buf[i]);
3826 #endif /* __BIG_ENDIAN */
3830 * ata_data_xfer - Transfer data by PIO
3831 * @adev: device to target
3833 * @buflen: buffer length
3834 * @write_data: read/write
3836 * Transfer data from/to the device data register by PIO.
3839 * Inherited from caller.
3841 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3842 unsigned int buflen, int write_data)
3844 struct ata_port *ap = adev->ap;
3845 unsigned int words = buflen >> 1;
3847 /* Transfer multiple of 2 bytes */
3849 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
3851 ioread16_rep(ap->ioaddr.data_addr, buf, words);
3853 /* Transfer trailing 1 byte, if any. */
3854 if (unlikely(buflen & 0x01)) {
3855 u16 align_buf[1] = { 0 };
3856 unsigned char *trailing_buf = buf + buflen - 1;
3859 memcpy(align_buf, trailing_buf, 1);
3860 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3862 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
3863 memcpy(trailing_buf, align_buf, 1);
3869 * ata_data_xfer_noirq - Transfer data by PIO
3870 * @adev: device to target
3872 * @buflen: buffer length
3873 * @write_data: read/write
3875 * Transfer data from/to the device data register by PIO. Do the
3876 * transfer with interrupts disabled.
3879 * Inherited from caller.
3881 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3882 unsigned int buflen, int write_data)
3884 unsigned long flags;
3885 local_irq_save(flags);
3886 ata_data_xfer(adev, buf, buflen, write_data);
3887 local_irq_restore(flags);
3892 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3893 * @qc: Command on going
3895 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3898 * Inherited from caller.
3901 static void ata_pio_sector(struct ata_queued_cmd *qc)
3903 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3904 struct scatterlist *sg = qc->__sg;
3905 struct ata_port *ap = qc->ap;
3907 unsigned int offset;
3910 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
3911 ap->hsm_task_state = HSM_ST_LAST;
3913 page = sg[qc->cursg].page;
3914 offset = sg[qc->cursg].offset + qc->cursg_ofs;
3916 /* get the current page and offset */
3917 page = nth_page(page, (offset >> PAGE_SHIFT));
3918 offset %= PAGE_SIZE;
3920 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3922 if (PageHighMem(page)) {
3923 unsigned long flags;
3925 /* FIXME: use a bounce buffer */
3926 local_irq_save(flags);
3927 buf = kmap_atomic(page, KM_IRQ0);
3929 /* do the actual data transfer */
3930 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3932 kunmap_atomic(buf, KM_IRQ0);
3933 local_irq_restore(flags);
3935 buf = page_address(page);
3936 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
3939 qc->curbytes += ATA_SECT_SIZE;
3940 qc->cursg_ofs += ATA_SECT_SIZE;
3942 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
3949 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3950 * @qc: Command on going
3952 * Transfer one or many ATA_SECT_SIZE of data from/to the
3953 * ATA device for the DRQ request.
3956 * Inherited from caller.
3959 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3961 if (is_multi_taskfile(&qc->tf)) {
3962 /* READ/WRITE MULTIPLE */
3965 WARN_ON(qc->dev->multi_count == 0);
3967 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
3968 qc->dev->multi_count);
3976 * atapi_send_cdb - Write CDB bytes to hardware
3977 * @ap: Port to which ATAPI device is attached.
3978 * @qc: Taskfile currently active
3980 * When device has indicated its readiness to accept
3981 * a CDB, this function is called. Send the CDB.
3987 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3990 DPRINTK("send cdb\n");
3991 WARN_ON(qc->dev->cdb_len < 12);
3993 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
3994 ata_altstatus(ap); /* flush */
3996 switch (qc->tf.protocol) {
3997 case ATA_PROT_ATAPI:
3998 ap->hsm_task_state = HSM_ST;
4000 case ATA_PROT_ATAPI_NODATA:
4001 ap->hsm_task_state = HSM_ST_LAST;
4003 case ATA_PROT_ATAPI_DMA:
4004 ap->hsm_task_state = HSM_ST_LAST;
4005 /* initiate bmdma */
4006 ap->ops->bmdma_start(qc);
4012 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4013 * @qc: Command on going
4014 * @bytes: number of bytes
4016 * Transfer Transfer data from/to the ATAPI device.
4019 * Inherited from caller.
4023 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4025 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4026 struct scatterlist *sg = qc->__sg;
4027 struct ata_port *ap = qc->ap;
4030 unsigned int offset, count;
4032 if (qc->curbytes + bytes >= qc->nbytes)
4033 ap->hsm_task_state = HSM_ST_LAST;
4036 if (unlikely(qc->cursg >= qc->n_elem)) {
4038 * The end of qc->sg is reached and the device expects
4039 * more data to transfer. In order not to overrun qc->sg
4040 * and fulfill length specified in the byte count register,
4041 * - for read case, discard trailing data from the device
4042 * - for write case, padding zero data to the device
4044 u16 pad_buf[1] = { 0 };
4045 unsigned int words = bytes >> 1;
4048 if (words) /* warning if bytes > 1 */
4049 ata_dev_printk(qc->dev, KERN_WARNING,
4050 "%u bytes trailing data\n", bytes);
4052 for (i = 0; i < words; i++)
4053 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4055 ap->hsm_task_state = HSM_ST_LAST;
4059 sg = &qc->__sg[qc->cursg];
4062 offset = sg->offset + qc->cursg_ofs;
4064 /* get the current page and offset */
4065 page = nth_page(page, (offset >> PAGE_SHIFT));
4066 offset %= PAGE_SIZE;
4068 /* don't overrun current sg */
4069 count = min(sg->length - qc->cursg_ofs, bytes);
4071 /* don't cross page boundaries */
4072 count = min(count, (unsigned int)PAGE_SIZE - offset);
4074 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4076 if (PageHighMem(page)) {
4077 unsigned long flags;
4079 /* FIXME: use bounce buffer */
4080 local_irq_save(flags);
4081 buf = kmap_atomic(page, KM_IRQ0);
4083 /* do the actual data transfer */
4084 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4086 kunmap_atomic(buf, KM_IRQ0);
4087 local_irq_restore(flags);
4089 buf = page_address(page);
4090 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4094 qc->curbytes += count;
4095 qc->cursg_ofs += count;
4097 if (qc->cursg_ofs == sg->length) {
4107 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4108 * @qc: Command on going
4110 * Transfer Transfer data from/to the ATAPI device.
4113 * Inherited from caller.
4116 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4118 struct ata_port *ap = qc->ap;
4119 struct ata_device *dev = qc->dev;
4120 unsigned int ireason, bc_lo, bc_hi, bytes;
4121 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4123 /* Abuse qc->result_tf for temp storage of intermediate TF
4124 * here to save some kernel stack usage.
4125 * For normal completion, qc->result_tf is not relevant. For
4126 * error, qc->result_tf is later overwritten by ata_qc_complete().
4127 * So, the correctness of qc->result_tf is not affected.
4129 ap->ops->tf_read(ap, &qc->result_tf);
4130 ireason = qc->result_tf.nsect;
4131 bc_lo = qc->result_tf.lbam;
4132 bc_hi = qc->result_tf.lbah;
4133 bytes = (bc_hi << 8) | bc_lo;
4135 /* shall be cleared to zero, indicating xfer of data */
4136 if (ireason & (1 << 0))
4139 /* make sure transfer direction matches expected */
4140 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4141 if (do_write != i_write)
4144 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4146 __atapi_pio_bytes(qc, bytes);
4151 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4152 qc->err_mask |= AC_ERR_HSM;
4153 ap->hsm_task_state = HSM_ST_ERR;
4157 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4158 * @ap: the target ata_port
4162 * 1 if ok in workqueue, 0 otherwise.
4165 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4167 if (qc->tf.flags & ATA_TFLAG_POLLING)
4170 if (ap->hsm_task_state == HSM_ST_FIRST) {
4171 if (qc->tf.protocol == ATA_PROT_PIO &&
4172 (qc->tf.flags & ATA_TFLAG_WRITE))
4175 if (is_atapi_taskfile(&qc->tf) &&
4176 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4184 * ata_hsm_qc_complete - finish a qc running on standard HSM
4185 * @qc: Command to complete
4186 * @in_wq: 1 if called from workqueue, 0 otherwise
4188 * Finish @qc which is running on standard HSM.
4191 * If @in_wq is zero, spin_lock_irqsave(host lock).
4192 * Otherwise, none on entry and grabs host lock.
4194 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4196 struct ata_port *ap = qc->ap;
4197 unsigned long flags;
4199 if (ap->ops->error_handler) {
4201 spin_lock_irqsave(ap->lock, flags);
4203 /* EH might have kicked in while host lock is
4206 qc = ata_qc_from_tag(ap, qc->tag);
4208 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4209 ap->ops->irq_on(ap);
4210 ata_qc_complete(qc);
4212 ata_port_freeze(ap);
4215 spin_unlock_irqrestore(ap->lock, flags);
4217 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4218 ata_qc_complete(qc);
4220 ata_port_freeze(ap);
4224 spin_lock_irqsave(ap->lock, flags);
4225 ap->ops->irq_on(ap);
4226 ata_qc_complete(qc);
4227 spin_unlock_irqrestore(ap->lock, flags);
4229 ata_qc_complete(qc);
4232 ata_altstatus(ap); /* flush */
4236 * ata_hsm_move - move the HSM to the next state.
4237 * @ap: the target ata_port
4239 * @status: current device status
4240 * @in_wq: 1 if called from workqueue, 0 otherwise
4243 * 1 when poll next status needed, 0 otherwise.
4245 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4246 u8 status, int in_wq)
4248 unsigned long flags = 0;
4251 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4253 /* Make sure ata_qc_issue_prot() does not throw things
4254 * like DMA polling into the workqueue. Notice that
4255 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4257 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4260 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4261 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4263 switch (ap->hsm_task_state) {
4265 /* Send first data block or PACKET CDB */
4267 /* If polling, we will stay in the work queue after
4268 * sending the data. Otherwise, interrupt handler
4269 * takes over after sending the data.
4271 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4273 /* check device status */
4274 if (unlikely((status & ATA_DRQ) == 0)) {
4275 /* handle BSY=0, DRQ=0 as error */
4276 if (likely(status & (ATA_ERR | ATA_DF)))
4277 /* device stops HSM for abort/error */
4278 qc->err_mask |= AC_ERR_DEV;
4280 /* HSM violation. Let EH handle this */
4281 qc->err_mask |= AC_ERR_HSM;
4283 ap->hsm_task_state = HSM_ST_ERR;
4287 /* Device should not ask for data transfer (DRQ=1)
4288 * when it finds something wrong.
4289 * We ignore DRQ here and stop the HSM by
4290 * changing hsm_task_state to HSM_ST_ERR and
4291 * let the EH abort the command or reset the device.
4293 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4294 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4296 qc->err_mask |= AC_ERR_HSM;
4297 ap->hsm_task_state = HSM_ST_ERR;
4301 /* Send the CDB (atapi) or the first data block (ata pio out).
4302 * During the state transition, interrupt handler shouldn't
4303 * be invoked before the data transfer is complete and
4304 * hsm_task_state is changed. Hence, the following locking.
4307 spin_lock_irqsave(ap->lock, flags);
4309 if (qc->tf.protocol == ATA_PROT_PIO) {
4310 /* PIO data out protocol.
4311 * send first data block.
4314 /* ata_pio_sectors() might change the state
4315 * to HSM_ST_LAST. so, the state is changed here
4316 * before ata_pio_sectors().
4318 ap->hsm_task_state = HSM_ST;
4319 ata_pio_sectors(qc);
4320 ata_altstatus(ap); /* flush */
4323 atapi_send_cdb(ap, qc);
4326 spin_unlock_irqrestore(ap->lock, flags);
4328 /* if polling, ata_pio_task() handles the rest.
4329 * otherwise, interrupt handler takes over from here.
4334 /* complete command or read/write the data register */
4335 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4336 /* ATAPI PIO protocol */
4337 if ((status & ATA_DRQ) == 0) {
4338 /* No more data to transfer or device error.
4339 * Device error will be tagged in HSM_ST_LAST.
4341 ap->hsm_task_state = HSM_ST_LAST;
4345 /* Device should not ask for data transfer (DRQ=1)
4346 * when it finds something wrong.
4347 * We ignore DRQ here and stop the HSM by
4348 * changing hsm_task_state to HSM_ST_ERR and
4349 * let the EH abort the command or reset the device.
4351 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4352 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4354 qc->err_mask |= AC_ERR_HSM;
4355 ap->hsm_task_state = HSM_ST_ERR;
4359 atapi_pio_bytes(qc);
4361 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4362 /* bad ireason reported by device */
4366 /* ATA PIO protocol */
4367 if (unlikely((status & ATA_DRQ) == 0)) {
4368 /* handle BSY=0, DRQ=0 as error */
4369 if (likely(status & (ATA_ERR | ATA_DF)))
4370 /* device stops HSM for abort/error */
4371 qc->err_mask |= AC_ERR_DEV;
4373 /* HSM violation. Let EH handle this.
4374 * Phantom devices also trigger this
4375 * condition. Mark hint.
4377 qc->err_mask |= AC_ERR_HSM |
4380 ap->hsm_task_state = HSM_ST_ERR;
4384 /* For PIO reads, some devices may ask for
4385 * data transfer (DRQ=1) alone with ERR=1.
4386 * We respect DRQ here and transfer one
4387 * block of junk data before changing the
4388 * hsm_task_state to HSM_ST_ERR.
4390 * For PIO writes, ERR=1 DRQ=1 doesn't make
4391 * sense since the data block has been
4392 * transferred to the device.
4394 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4395 /* data might be corrputed */
4396 qc->err_mask |= AC_ERR_DEV;
4398 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4399 ata_pio_sectors(qc);
4401 status = ata_wait_idle(ap);
4404 if (status & (ATA_BUSY | ATA_DRQ))
4405 qc->err_mask |= AC_ERR_HSM;
4407 /* ata_pio_sectors() might change the
4408 * state to HSM_ST_LAST. so, the state
4409 * is changed after ata_pio_sectors().
4411 ap->hsm_task_state = HSM_ST_ERR;
4415 ata_pio_sectors(qc);
4417 if (ap->hsm_task_state == HSM_ST_LAST &&
4418 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4421 status = ata_wait_idle(ap);
4426 ata_altstatus(ap); /* flush */
4431 if (unlikely(!ata_ok(status))) {
4432 qc->err_mask |= __ac_err_mask(status);
4433 ap->hsm_task_state = HSM_ST_ERR;
4437 /* no more data to transfer */
4438 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4439 ap->id, qc->dev->devno, status);
4441 WARN_ON(qc->err_mask);
4443 ap->hsm_task_state = HSM_ST_IDLE;
4445 /* complete taskfile transaction */
4446 ata_hsm_qc_complete(qc, in_wq);
4452 /* make sure qc->err_mask is available to
4453 * know what's wrong and recover
4455 WARN_ON(qc->err_mask == 0);
4457 ap->hsm_task_state = HSM_ST_IDLE;
4459 /* complete taskfile transaction */
4460 ata_hsm_qc_complete(qc, in_wq);
4472 static void ata_pio_task(struct work_struct *work)
4474 struct ata_port *ap =
4475 container_of(work, struct ata_port, port_task.work);
4476 struct ata_queued_cmd *qc = ap->port_task_data;
4481 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4484 * This is purely heuristic. This is a fast path.
4485 * Sometimes when we enter, BSY will be cleared in
4486 * a chk-status or two. If not, the drive is probably seeking
4487 * or something. Snooze for a couple msecs, then
4488 * chk-status again. If still busy, queue delayed work.
4490 status = ata_busy_wait(ap, ATA_BUSY, 5);
4491 if (status & ATA_BUSY) {
4493 status = ata_busy_wait(ap, ATA_BUSY, 10);
4494 if (status & ATA_BUSY) {
4495 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4501 poll_next = ata_hsm_move(ap, qc, status, 1);
4503 /* another command or interrupt handler
4504 * may be running at this point.
4511 * ata_qc_new - Request an available ATA command, for queueing
4512 * @ap: Port associated with device @dev
4513 * @dev: Device from whom we request an available command structure
4519 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4521 struct ata_queued_cmd *qc = NULL;
4524 /* no command while frozen */
4525 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4528 /* the last tag is reserved for internal command. */
4529 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4530 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4531 qc = __ata_qc_from_tag(ap, i);
4542 * ata_qc_new_init - Request an available ATA command, and initialize it
4543 * @dev: Device from whom we request an available command structure
4549 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4551 struct ata_port *ap = dev->ap;
4552 struct ata_queued_cmd *qc;
4554 qc = ata_qc_new(ap);
4567 * ata_qc_free - free unused ata_queued_cmd
4568 * @qc: Command to complete
4570 * Designed to free unused ata_queued_cmd object
4571 * in case something prevents using it.
4574 * spin_lock_irqsave(host lock)
4576 void ata_qc_free(struct ata_queued_cmd *qc)
4578 struct ata_port *ap = qc->ap;
4581 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4585 if (likely(ata_tag_valid(tag))) {
4586 qc->tag = ATA_TAG_POISON;
4587 clear_bit(tag, &ap->qc_allocated);
4591 void __ata_qc_complete(struct ata_queued_cmd *qc)
4593 struct ata_port *ap = qc->ap;
4595 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4596 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4598 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4601 /* command should be marked inactive atomically with qc completion */
4602 if (qc->tf.protocol == ATA_PROT_NCQ)
4603 ap->sactive &= ~(1 << qc->tag);
4605 ap->active_tag = ATA_TAG_POISON;
4607 /* atapi: mark qc as inactive to prevent the interrupt handler
4608 * from completing the command twice later, before the error handler
4609 * is called. (when rc != 0 and atapi request sense is needed)
4611 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4612 ap->qc_active &= ~(1 << qc->tag);
4614 /* call completion callback */
4615 qc->complete_fn(qc);
4618 static void fill_result_tf(struct ata_queued_cmd *qc)
4620 struct ata_port *ap = qc->ap;
4622 ap->ops->tf_read(ap, &qc->result_tf);
4623 qc->result_tf.flags = qc->tf.flags;
4627 * ata_qc_complete - Complete an active ATA command
4628 * @qc: Command to complete
4629 * @err_mask: ATA Status register contents
4631 * Indicate to the mid and upper layers that an ATA
4632 * command has completed, with either an ok or not-ok status.
4635 * spin_lock_irqsave(host lock)
4637 void ata_qc_complete(struct ata_queued_cmd *qc)
4639 struct ata_port *ap = qc->ap;
4641 /* XXX: New EH and old EH use different mechanisms to
4642 * synchronize EH with regular execution path.
4644 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4645 * Normal execution path is responsible for not accessing a
4646 * failed qc. libata core enforces the rule by returning NULL
4647 * from ata_qc_from_tag() for failed qcs.
4649 * Old EH depends on ata_qc_complete() nullifying completion
4650 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4651 * not synchronize with interrupt handler. Only PIO task is
4654 if (ap->ops->error_handler) {
4655 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4657 if (unlikely(qc->err_mask))
4658 qc->flags |= ATA_QCFLAG_FAILED;
4660 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4661 if (!ata_tag_internal(qc->tag)) {
4662 /* always fill result TF for failed qc */
4664 ata_qc_schedule_eh(qc);
4669 /* read result TF if requested */
4670 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4673 __ata_qc_complete(qc);
4675 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4678 /* read result TF if failed or requested */
4679 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4682 __ata_qc_complete(qc);
4687 * ata_qc_complete_multiple - Complete multiple qcs successfully
4688 * @ap: port in question
4689 * @qc_active: new qc_active mask
4690 * @finish_qc: LLDD callback invoked before completing a qc
4692 * Complete in-flight commands. This functions is meant to be
4693 * called from low-level driver's interrupt routine to complete
4694 * requests normally. ap->qc_active and @qc_active is compared
4695 * and commands are completed accordingly.
4698 * spin_lock_irqsave(host lock)
4701 * Number of completed commands on success, -errno otherwise.
4703 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4704 void (*finish_qc)(struct ata_queued_cmd *))
4710 done_mask = ap->qc_active ^ qc_active;
4712 if (unlikely(done_mask & qc_active)) {
4713 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4714 "(%08x->%08x)\n", ap->qc_active, qc_active);
4718 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4719 struct ata_queued_cmd *qc;
4721 if (!(done_mask & (1 << i)))
4724 if ((qc = ata_qc_from_tag(ap, i))) {
4727 ata_qc_complete(qc);
4735 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4737 struct ata_port *ap = qc->ap;
4739 switch (qc->tf.protocol) {
4742 case ATA_PROT_ATAPI_DMA:
4745 case ATA_PROT_ATAPI:
4747 if (ap->flags & ATA_FLAG_PIO_DMA)
4760 * ata_qc_issue - issue taskfile to device
4761 * @qc: command to issue to device
4763 * Prepare an ATA command to submission to device.
4764 * This includes mapping the data into a DMA-able
4765 * area, filling in the S/G table, and finally
4766 * writing the taskfile to hardware, starting the command.
4769 * spin_lock_irqsave(host lock)
4771 void ata_qc_issue(struct ata_queued_cmd *qc)
4773 struct ata_port *ap = qc->ap;
4775 /* Make sure only one non-NCQ command is outstanding. The
4776 * check is skipped for old EH because it reuses active qc to
4777 * request ATAPI sense.
4779 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4781 if (qc->tf.protocol == ATA_PROT_NCQ) {
4782 WARN_ON(ap->sactive & (1 << qc->tag));
4783 ap->sactive |= 1 << qc->tag;
4785 WARN_ON(ap->sactive);
4786 ap->active_tag = qc->tag;
4789 qc->flags |= ATA_QCFLAG_ACTIVE;
4790 ap->qc_active |= 1 << qc->tag;
4792 if (ata_should_dma_map(qc)) {
4793 if (qc->flags & ATA_QCFLAG_SG) {
4794 if (ata_sg_setup(qc))
4796 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4797 if (ata_sg_setup_one(qc))
4801 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4804 ap->ops->qc_prep(qc);
4806 qc->err_mask |= ap->ops->qc_issue(qc);
4807 if (unlikely(qc->err_mask))
4812 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4813 qc->err_mask |= AC_ERR_SYSTEM;
4815 ata_qc_complete(qc);
4819 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4820 * @qc: command to issue to device
4822 * Using various libata functions and hooks, this function
4823 * starts an ATA command. ATA commands are grouped into
4824 * classes called "protocols", and issuing each type of protocol
4825 * is slightly different.
4827 * May be used as the qc_issue() entry in ata_port_operations.
4830 * spin_lock_irqsave(host lock)
4833 * Zero on success, AC_ERR_* mask on failure
4836 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4838 struct ata_port *ap = qc->ap;
4840 /* Use polling pio if the LLD doesn't handle
4841 * interrupt driven pio and atapi CDB interrupt.
4843 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4844 switch (qc->tf.protocol) {
4846 case ATA_PROT_NODATA:
4847 case ATA_PROT_ATAPI:
4848 case ATA_PROT_ATAPI_NODATA:
4849 qc->tf.flags |= ATA_TFLAG_POLLING;
4851 case ATA_PROT_ATAPI_DMA:
4852 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4853 /* see ata_dma_blacklisted() */
4861 /* Some controllers show flaky interrupt behavior after
4862 * setting xfer mode. Use polling instead.
4864 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4865 qc->tf.feature == SETFEATURES_XFER) &&
4866 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4867 qc->tf.flags |= ATA_TFLAG_POLLING;
4869 /* select the device */
4870 ata_dev_select(ap, qc->dev->devno, 1, 0);
4872 /* start the command */
4873 switch (qc->tf.protocol) {
4874 case ATA_PROT_NODATA:
4875 if (qc->tf.flags & ATA_TFLAG_POLLING)
4876 ata_qc_set_polling(qc);
4878 ata_tf_to_host(ap, &qc->tf);
4879 ap->hsm_task_state = HSM_ST_LAST;
4881 if (qc->tf.flags & ATA_TFLAG_POLLING)
4882 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4887 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4889 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4890 ap->ops->bmdma_setup(qc); /* set up bmdma */
4891 ap->ops->bmdma_start(qc); /* initiate bmdma */
4892 ap->hsm_task_state = HSM_ST_LAST;
4896 if (qc->tf.flags & ATA_TFLAG_POLLING)
4897 ata_qc_set_polling(qc);
4899 ata_tf_to_host(ap, &qc->tf);
4901 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4902 /* PIO data out protocol */
4903 ap->hsm_task_state = HSM_ST_FIRST;
4904 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4906 /* always send first data block using
4907 * the ata_pio_task() codepath.
4910 /* PIO data in protocol */
4911 ap->hsm_task_state = HSM_ST;
4913 if (qc->tf.flags & ATA_TFLAG_POLLING)
4914 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4916 /* if polling, ata_pio_task() handles the rest.
4917 * otherwise, interrupt handler takes over from here.
4923 case ATA_PROT_ATAPI:
4924 case ATA_PROT_ATAPI_NODATA:
4925 if (qc->tf.flags & ATA_TFLAG_POLLING)
4926 ata_qc_set_polling(qc);
4928 ata_tf_to_host(ap, &qc->tf);
4930 ap->hsm_task_state = HSM_ST_FIRST;
4932 /* send cdb by polling if no cdb interrupt */
4933 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4934 (qc->tf.flags & ATA_TFLAG_POLLING))
4935 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4938 case ATA_PROT_ATAPI_DMA:
4939 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4941 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4942 ap->ops->bmdma_setup(qc); /* set up bmdma */
4943 ap->hsm_task_state = HSM_ST_FIRST;
4945 /* send cdb by polling if no cdb interrupt */
4946 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4947 ata_port_queue_task(ap, ata_pio_task, qc, 0);
4952 return AC_ERR_SYSTEM;
4959 * ata_host_intr - Handle host interrupt for given (port, task)
4960 * @ap: Port on which interrupt arrived (possibly...)
4961 * @qc: Taskfile currently active in engine
4963 * Handle host interrupt for given queued command. Currently,
4964 * only DMA interrupts are handled. All other commands are
4965 * handled via polling with interrupts disabled (nIEN bit).
4968 * spin_lock_irqsave(host lock)
4971 * One if interrupt was handled, zero if not (shared irq).
4974 inline unsigned int ata_host_intr (struct ata_port *ap,
4975 struct ata_queued_cmd *qc)
4977 struct ata_eh_info *ehi = &ap->eh_info;
4978 u8 status, host_stat = 0;
4980 VPRINTK("ata%u: protocol %d task_state %d\n",
4981 ap->id, qc->tf.protocol, ap->hsm_task_state);
4983 /* Check whether we are expecting interrupt in this state */
4984 switch (ap->hsm_task_state) {
4986 /* Some pre-ATAPI-4 devices assert INTRQ
4987 * at this state when ready to receive CDB.
4990 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4991 * The flag was turned on only for atapi devices.
4992 * No need to check is_atapi_taskfile(&qc->tf) again.
4994 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4998 if (qc->tf.protocol == ATA_PROT_DMA ||
4999 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5000 /* check status of DMA engine */
5001 host_stat = ap->ops->bmdma_status(ap);
5002 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
5004 /* if it's not our irq... */
5005 if (!(host_stat & ATA_DMA_INTR))
5008 /* before we do anything else, clear DMA-Start bit */
5009 ap->ops->bmdma_stop(qc);
5011 if (unlikely(host_stat & ATA_DMA_ERR)) {
5012 /* error when transfering data to/from memory */
5013 qc->err_mask |= AC_ERR_HOST_BUS;
5014 ap->hsm_task_state = HSM_ST_ERR;
5024 /* check altstatus */
5025 status = ata_altstatus(ap);
5026 if (status & ATA_BUSY)
5029 /* check main status, clearing INTRQ */
5030 status = ata_chk_status(ap);
5031 if (unlikely(status & ATA_BUSY))
5034 /* ack bmdma irq events */
5035 ap->ops->irq_clear(ap);
5037 ata_hsm_move(ap, qc, status, 0);
5039 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5040 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5041 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5043 return 1; /* irq handled */
5046 ap->stats.idle_irq++;
5049 if ((ap->stats.idle_irq % 1000) == 0) {
5050 ap->ops->irq_ack(ap, 0); /* debug trap */
5051 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5055 return 0; /* irq not handled */
5059 * ata_interrupt - Default ATA host interrupt handler
5060 * @irq: irq line (unused)
5061 * @dev_instance: pointer to our ata_host information structure
5063 * Default interrupt handler for PCI IDE devices. Calls
5064 * ata_host_intr() for each port that is not disabled.
5067 * Obtains host lock during operation.
5070 * IRQ_NONE or IRQ_HANDLED.
5073 irqreturn_t ata_interrupt (int irq, void *dev_instance)
5075 struct ata_host *host = dev_instance;
5077 unsigned int handled = 0;
5078 unsigned long flags;
5080 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5081 spin_lock_irqsave(&host->lock, flags);
5083 for (i = 0; i < host->n_ports; i++) {
5084 struct ata_port *ap;
5086 ap = host->ports[i];
5088 !(ap->flags & ATA_FLAG_DISABLED)) {
5089 struct ata_queued_cmd *qc;
5091 qc = ata_qc_from_tag(ap, ap->active_tag);
5092 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5093 (qc->flags & ATA_QCFLAG_ACTIVE))
5094 handled |= ata_host_intr(ap, qc);
5098 spin_unlock_irqrestore(&host->lock, flags);
5100 return IRQ_RETVAL(handled);
5104 * sata_scr_valid - test whether SCRs are accessible
5105 * @ap: ATA port to test SCR accessibility for
5107 * Test whether SCRs are accessible for @ap.
5113 * 1 if SCRs are accessible, 0 otherwise.
5115 int sata_scr_valid(struct ata_port *ap)
5117 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5121 * sata_scr_read - read SCR register of the specified port
5122 * @ap: ATA port to read SCR for
5124 * @val: Place to store read value
5126 * Read SCR register @reg of @ap into *@val. This function is
5127 * guaranteed to succeed if the cable type of the port is SATA
5128 * and the port implements ->scr_read.
5134 * 0 on success, negative errno on failure.
5136 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5138 if (sata_scr_valid(ap)) {
5139 *val = ap->ops->scr_read(ap, reg);
5146 * sata_scr_write - write SCR register of the specified port
5147 * @ap: ATA port to write SCR for
5148 * @reg: SCR to write
5149 * @val: value to write
5151 * Write @val to SCR register @reg of @ap. This function is
5152 * guaranteed to succeed if the cable type of the port is SATA
5153 * and the port implements ->scr_read.
5159 * 0 on success, negative errno on failure.
5161 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5163 if (sata_scr_valid(ap)) {
5164 ap->ops->scr_write(ap, reg, val);
5171 * sata_scr_write_flush - write SCR register of the specified port and flush
5172 * @ap: ATA port to write SCR for
5173 * @reg: SCR to write
5174 * @val: value to write
5176 * This function is identical to sata_scr_write() except that this
5177 * function performs flush after writing to the register.
5183 * 0 on success, negative errno on failure.
5185 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5187 if (sata_scr_valid(ap)) {
5188 ap->ops->scr_write(ap, reg, val);
5189 ap->ops->scr_read(ap, reg);
5196 * ata_port_online - test whether the given port is online
5197 * @ap: ATA port to test
5199 * Test whether @ap is online. Note that this function returns 0
5200 * if online status of @ap cannot be obtained, so
5201 * ata_port_online(ap) != !ata_port_offline(ap).
5207 * 1 if the port online status is available and online.
5209 int ata_port_online(struct ata_port *ap)
5213 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5219 * ata_port_offline - test whether the given port is offline
5220 * @ap: ATA port to test
5222 * Test whether @ap is offline. Note that this function returns
5223 * 0 if offline status of @ap cannot be obtained, so
5224 * ata_port_online(ap) != !ata_port_offline(ap).
5230 * 1 if the port offline status is available and offline.
5232 int ata_port_offline(struct ata_port *ap)
5236 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5241 int ata_flush_cache(struct ata_device *dev)
5243 unsigned int err_mask;
5246 if (!ata_try_flush_cache(dev))
5249 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5250 cmd = ATA_CMD_FLUSH_EXT;
5252 cmd = ATA_CMD_FLUSH;
5254 err_mask = ata_do_simple_cmd(dev, cmd);
5256 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5263 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5264 unsigned int action, unsigned int ehi_flags,
5267 unsigned long flags;
5270 for (i = 0; i < host->n_ports; i++) {
5271 struct ata_port *ap = host->ports[i];
5273 /* Previous resume operation might still be in
5274 * progress. Wait for PM_PENDING to clear.
5276 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5277 ata_port_wait_eh(ap);
5278 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5281 /* request PM ops to EH */
5282 spin_lock_irqsave(ap->lock, flags);
5287 ap->pm_result = &rc;
5290 ap->pflags |= ATA_PFLAG_PM_PENDING;
5291 ap->eh_info.action |= action;
5292 ap->eh_info.flags |= ehi_flags;
5294 ata_port_schedule_eh(ap);
5296 spin_unlock_irqrestore(ap->lock, flags);
5298 /* wait and check result */
5300 ata_port_wait_eh(ap);
5301 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5311 * ata_host_suspend - suspend host
5312 * @host: host to suspend
5315 * Suspend @host. Actual operation is performed by EH. This
5316 * function requests EH to perform PM operations and waits for EH
5320 * Kernel thread context (may sleep).
5323 * 0 on success, -errno on failure.
5325 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5329 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5333 /* EH is quiescent now. Fail if we have any ready device.
5334 * This happens if hotplug occurs between completion of device
5335 * suspension and here.
5337 for (i = 0; i < host->n_ports; i++) {
5338 struct ata_port *ap = host->ports[i];
5340 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5341 struct ata_device *dev = &ap->device[j];
5343 if (ata_dev_ready(dev)) {
5344 ata_port_printk(ap, KERN_WARNING,
5345 "suspend failed, device %d "
5346 "still active\n", dev->devno);
5353 host->dev->power.power_state = mesg;
5357 ata_host_resume(host);
5362 * ata_host_resume - resume host
5363 * @host: host to resume
5365 * Resume @host. Actual operation is performed by EH. This
5366 * function requests EH to perform PM operations and returns.
5367 * Note that all resume operations are performed parallely.
5370 * Kernel thread context (may sleep).
5372 void ata_host_resume(struct ata_host *host)
5374 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5375 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5376 host->dev->power.power_state = PMSG_ON;
5380 * ata_port_start - Set port up for dma.
5381 * @ap: Port to initialize
5383 * Called just after data structures for each port are
5384 * initialized. Allocates space for PRD table.
5386 * May be used as the port_start() entry in ata_port_operations.
5389 * Inherited from caller.
5391 int ata_port_start(struct ata_port *ap)
5393 struct device *dev = ap->dev;
5396 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5401 rc = ata_pad_alloc(ap, dev);
5405 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5406 (unsigned long long)ap->prd_dma);
5411 * ata_dev_init - Initialize an ata_device structure
5412 * @dev: Device structure to initialize
5414 * Initialize @dev in preparation for probing.
5417 * Inherited from caller.
5419 void ata_dev_init(struct ata_device *dev)
5421 struct ata_port *ap = dev->ap;
5422 unsigned long flags;
5424 /* SATA spd limit is bound to the first device */
5425 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5427 /* High bits of dev->flags are used to record warm plug
5428 * requests which occur asynchronously. Synchronize using
5431 spin_lock_irqsave(ap->lock, flags);
5432 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5433 spin_unlock_irqrestore(ap->lock, flags);
5435 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5436 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5437 dev->pio_mask = UINT_MAX;
5438 dev->mwdma_mask = UINT_MAX;
5439 dev->udma_mask = UINT_MAX;
5443 * ata_port_init - Initialize an ata_port structure
5444 * @ap: Structure to initialize
5445 * @host: Collection of hosts to which @ap belongs
5446 * @ent: Probe information provided by low-level driver
5447 * @port_no: Port number associated with this ata_port
5449 * Initialize a new ata_port structure.
5452 * Inherited from caller.
5454 void ata_port_init(struct ata_port *ap, struct ata_host *host,
5455 const struct ata_probe_ent *ent, unsigned int port_no)
5459 ap->lock = &host->lock;
5460 ap->flags = ATA_FLAG_DISABLED;
5461 ap->id = ata_unique_id++;
5462 ap->ctl = ATA_DEVCTL_OBS;
5465 ap->port_no = port_no;
5466 if (port_no == 1 && ent->pinfo2) {
5467 ap->pio_mask = ent->pinfo2->pio_mask;
5468 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5469 ap->udma_mask = ent->pinfo2->udma_mask;
5470 ap->flags |= ent->pinfo2->flags;
5471 ap->ops = ent->pinfo2->port_ops;
5473 ap->pio_mask = ent->pio_mask;
5474 ap->mwdma_mask = ent->mwdma_mask;
5475 ap->udma_mask = ent->udma_mask;
5476 ap->flags |= ent->port_flags;
5477 ap->ops = ent->port_ops;
5479 ap->hw_sata_spd_limit = UINT_MAX;
5480 ap->active_tag = ATA_TAG_POISON;
5481 ap->last_ctl = 0xFF;
5483 #if defined(ATA_VERBOSE_DEBUG)
5484 /* turn on all debugging levels */
5485 ap->msg_enable = 0x00FF;
5486 #elif defined(ATA_DEBUG)
5487 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5489 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5492 INIT_DELAYED_WORK(&ap->port_task, NULL);
5493 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5494 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5495 INIT_LIST_HEAD(&ap->eh_done_q);
5496 init_waitqueue_head(&ap->eh_wait_q);
5498 /* set cable type */
5499 ap->cbl = ATA_CBL_NONE;
5500 if (ap->flags & ATA_FLAG_SATA)
5501 ap->cbl = ATA_CBL_SATA;
5503 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5504 struct ata_device *dev = &ap->device[i];
5511 ap->stats.unhandled_irq = 1;
5512 ap->stats.idle_irq = 1;
5515 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5519 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5520 * @ap: ATA port to initialize SCSI host for
5521 * @shost: SCSI host associated with @ap
5523 * Initialize SCSI host @shost associated with ATA port @ap.
5526 * Inherited from caller.
5528 static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5530 ap->scsi_host = shost;
5532 shost->unique_id = ap->id;
5535 shost->max_channel = 1;
5536 shost->max_cmd_len = 12;
5540 * ata_port_add - Attach low-level ATA driver to system
5541 * @ent: Information provided by low-level driver
5542 * @host: Collections of ports to which we add
5543 * @port_no: Port number associated with this host
5545 * Attach low-level ATA driver to system.
5548 * PCI/etc. bus probe sem.
5551 * New ata_port on success, for NULL on error.
5553 static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5554 struct ata_host *host,
5555 unsigned int port_no)
5557 struct Scsi_Host *shost;
5558 struct ata_port *ap;
5562 if (!ent->port_ops->error_handler &&
5563 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5564 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5569 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5573 shost->transportt = &ata_scsi_transport_template;
5575 ap = ata_shost_to_port(shost);
5577 ata_port_init(ap, host, ent, port_no);
5578 ata_port_init_shost(ap, shost);
5583 static void ata_host_release(struct device *gendev, void *res)
5585 struct ata_host *host = dev_get_drvdata(gendev);
5588 for (i = 0; i < host->n_ports; i++) {
5589 struct ata_port *ap = host->ports[i];
5594 if (ap->ops->port_stop)
5595 ap->ops->port_stop(ap);
5597 scsi_host_put(ap->scsi_host);
5600 if (host->ops->host_stop)
5601 host->ops->host_stop(host);
5605 * ata_sas_host_init - Initialize a host struct
5606 * @host: host to initialize
5607 * @dev: device host is attached to
5608 * @flags: host flags
5612 * PCI/etc. bus probe sem.
5616 void ata_host_init(struct ata_host *host, struct device *dev,
5617 unsigned long flags, const struct ata_port_operations *ops)
5619 spin_lock_init(&host->lock);
5621 host->flags = flags;
5626 * ata_device_add - Register hardware device with ATA and SCSI layers
5627 * @ent: Probe information describing hardware device to be registered
5629 * This function processes the information provided in the probe
5630 * information struct @ent, allocates the necessary ATA and SCSI
5631 * host information structures, initializes them, and registers
5632 * everything with requisite kernel subsystems.
5634 * This function requests irqs, probes the ATA bus, and probes
5638 * PCI/etc. bus probe sem.
5641 * Number of ports registered. Zero on error (no ports registered).
5643 int ata_device_add(const struct ata_probe_ent *ent)
5646 struct device *dev = ent->dev;
5647 struct ata_host *host;
5652 if (ent->irq == 0) {
5653 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5657 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5660 /* alloc a container for our list of ATA ports (buses) */
5661 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5662 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5665 devres_add(dev, host);
5666 dev_set_drvdata(dev, host);
5668 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5669 host->n_ports = ent->n_ports;
5670 host->irq = ent->irq;
5671 host->irq2 = ent->irq2;
5672 host->iomap = ent->iomap;
5673 host->private_data = ent->private_data;
5675 /* register each port bound to this device */
5676 for (i = 0; i < host->n_ports; i++) {
5677 struct ata_port *ap;
5678 unsigned long xfer_mode_mask;
5679 int irq_line = ent->irq;
5681 ap = ata_port_add(ent, host, i);
5682 host->ports[i] = ap;
5687 if (ent->dummy_port_mask & (1 << i)) {
5688 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5689 ap->ops = &ata_dummy_port_ops;
5694 rc = ap->ops->port_start(ap);
5696 host->ports[i] = NULL;
5697 scsi_host_put(ap->scsi_host);
5701 /* Report the secondary IRQ for second channel legacy */
5702 if (i == 1 && ent->irq2)
5703 irq_line = ent->irq2;
5705 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5706 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5707 (ap->pio_mask << ATA_SHIFT_PIO);
5709 /* print per-port info to dmesg */
5710 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5711 "ctl 0x%p bmdma 0x%p irq %d\n",
5712 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5713 ata_mode_string(xfer_mode_mask),
5714 ap->ioaddr.cmd_addr,
5715 ap->ioaddr.ctl_addr,
5716 ap->ioaddr.bmdma_addr,
5719 /* freeze port before requesting IRQ */
5720 ata_eh_freeze_port(ap);
5723 /* obtain irq, that may be shared between channels */
5724 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5725 ent->irq_flags, DRV_NAME, host);
5727 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5732 /* do we have a second IRQ for the other channel, eg legacy mode */
5734 /* We will get weird core code crashes later if this is true
5736 BUG_ON(ent->irq == ent->irq2);
5738 rc = devm_request_irq(dev, ent->irq2,
5739 ent->port_ops->irq_handler, ent->irq_flags,
5742 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5748 /* resource acquisition complete */
5749 devres_remove_group(dev, ata_device_add);
5751 /* perform each probe synchronously */
5752 DPRINTK("probe begin\n");
5753 for (i = 0; i < host->n_ports; i++) {
5754 struct ata_port *ap = host->ports[i];
5758 /* init sata_spd_limit to the current value */
5759 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5760 int spd = (scontrol >> 4) & 0xf;
5761 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5763 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5765 rc = scsi_add_host(ap->scsi_host, dev);
5767 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
5768 /* FIXME: do something useful here */
5769 /* FIXME: handle unconditional calls to
5770 * scsi_scan_host and ata_host_remove, below,
5775 if (ap->ops->error_handler) {
5776 struct ata_eh_info *ehi = &ap->eh_info;
5777 unsigned long flags;
5781 /* kick EH for boot probing */
5782 spin_lock_irqsave(ap->lock, flags);
5784 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5785 ehi->action |= ATA_EH_SOFTRESET;
5786 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5788 ap->pflags |= ATA_PFLAG_LOADING;
5789 ata_port_schedule_eh(ap);
5791 spin_unlock_irqrestore(ap->lock, flags);
5793 /* wait for EH to finish */
5794 ata_port_wait_eh(ap);
5796 DPRINTK("ata%u: bus probe begin\n", ap->id);
5797 rc = ata_bus_probe(ap);
5798 DPRINTK("ata%u: bus probe end\n", ap->id);
5801 /* FIXME: do something useful here?
5802 * Current libata behavior will
5803 * tear down everything when
5804 * the module is removed
5805 * or the h/w is unplugged.
5811 /* probes are done, now scan each port's disk(s) */
5812 DPRINTK("host probe begin\n");
5813 for (i = 0; i < host->n_ports; i++) {
5814 struct ata_port *ap = host->ports[i];
5816 ata_scsi_scan_host(ap);
5819 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5820 return ent->n_ports; /* success */
5823 devres_release_group(dev, ata_device_add);
5824 dev_set_drvdata(dev, NULL);
5825 VPRINTK("EXIT, returning %d\n", rc);
5830 * ata_port_detach - Detach ATA port in prepration of device removal
5831 * @ap: ATA port to be detached
5833 * Detach all ATA devices and the associated SCSI devices of @ap;
5834 * then, remove the associated SCSI host. @ap is guaranteed to
5835 * be quiescent on return from this function.
5838 * Kernel thread context (may sleep).
5840 void ata_port_detach(struct ata_port *ap)
5842 unsigned long flags;
5845 if (!ap->ops->error_handler)
5848 /* tell EH we're leaving & flush EH */
5849 spin_lock_irqsave(ap->lock, flags);
5850 ap->pflags |= ATA_PFLAG_UNLOADING;
5851 spin_unlock_irqrestore(ap->lock, flags);
5853 ata_port_wait_eh(ap);
5855 /* EH is now guaranteed to see UNLOADING, so no new device
5856 * will be attached. Disable all existing devices.
5858 spin_lock_irqsave(ap->lock, flags);
5860 for (i = 0; i < ATA_MAX_DEVICES; i++)
5861 ata_dev_disable(&ap->device[i]);
5863 spin_unlock_irqrestore(ap->lock, flags);
5865 /* Final freeze & EH. All in-flight commands are aborted. EH
5866 * will be skipped and retrials will be terminated with bad
5869 spin_lock_irqsave(ap->lock, flags);
5870 ata_port_freeze(ap); /* won't be thawed */
5871 spin_unlock_irqrestore(ap->lock, flags);
5873 ata_port_wait_eh(ap);
5875 /* Flush hotplug task. The sequence is similar to
5876 * ata_port_flush_task().
5878 flush_workqueue(ata_aux_wq);
5879 cancel_delayed_work(&ap->hotplug_task);
5880 flush_workqueue(ata_aux_wq);
5883 /* remove the associated SCSI host */
5884 scsi_remove_host(ap->scsi_host);
5888 * ata_host_detach - Detach all ports of an ATA host
5889 * @host: Host to detach
5891 * Detach all ports of @host.
5894 * Kernel thread context (may sleep).
5896 void ata_host_detach(struct ata_host *host)
5900 for (i = 0; i < host->n_ports; i++)
5901 ata_port_detach(host->ports[i]);
5904 struct ata_probe_ent *
5905 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5907 struct ata_probe_ent *probe_ent;
5909 /* XXX - the following if can go away once all LLDs are managed */
5910 if (!list_empty(&dev->devres_head))
5911 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
5913 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5915 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5916 kobject_name(&(dev->kobj)));
5920 INIT_LIST_HEAD(&probe_ent->node);
5921 probe_ent->dev = dev;
5923 probe_ent->sht = port->sht;
5924 probe_ent->port_flags = port->flags;
5925 probe_ent->pio_mask = port->pio_mask;
5926 probe_ent->mwdma_mask = port->mwdma_mask;
5927 probe_ent->udma_mask = port->udma_mask;
5928 probe_ent->port_ops = port->port_ops;
5929 probe_ent->private_data = port->private_data;
5935 * ata_std_ports - initialize ioaddr with standard port offsets.
5936 * @ioaddr: IO address structure to be initialized
5938 * Utility function which initializes data_addr, error_addr,
5939 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5940 * device_addr, status_addr, and command_addr to standard offsets
5941 * relative to cmd_addr.
5943 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5946 void ata_std_ports(struct ata_ioports *ioaddr)
5948 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5949 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5950 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5951 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5952 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5953 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5954 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5955 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5956 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5957 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5964 * ata_pci_remove_one - PCI layer callback for device removal
5965 * @pdev: PCI device that was removed
5967 * PCI layer indicates to libata via this hook that hot-unplug or
5968 * module unload event has occurred. Detach all ports. Resource
5969 * release is handled via devres.
5972 * Inherited from PCI layer (may sleep).
5974 void ata_pci_remove_one(struct pci_dev *pdev)
5976 struct device *dev = pci_dev_to_dev(pdev);
5977 struct ata_host *host = dev_get_drvdata(dev);
5979 ata_host_detach(host);
5982 /* move to PCI subsystem */
5983 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5985 unsigned long tmp = 0;
5987 switch (bits->width) {
5990 pci_read_config_byte(pdev, bits->reg, &tmp8);
5996 pci_read_config_word(pdev, bits->reg, &tmp16);
6002 pci_read_config_dword(pdev, bits->reg, &tmp32);
6013 return (tmp == bits->val) ? 1 : 0;
6016 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6018 pci_save_state(pdev);
6020 if (mesg.event == PM_EVENT_SUSPEND) {
6021 pci_disable_device(pdev);
6022 pci_set_power_state(pdev, PCI_D3hot);
6026 int ata_pci_device_do_resume(struct pci_dev *pdev)
6030 pci_set_power_state(pdev, PCI_D0);
6031 pci_restore_state(pdev);
6033 rc = pcim_enable_device(pdev);
6035 dev_printk(KERN_ERR, &pdev->dev,
6036 "failed to enable device after resume (%d)\n", rc);
6040 pci_set_master(pdev);
6044 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6046 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6049 rc = ata_host_suspend(host, mesg);
6053 ata_pci_device_do_suspend(pdev, mesg);
6058 int ata_pci_device_resume(struct pci_dev *pdev)
6060 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6063 rc = ata_pci_device_do_resume(pdev);
6065 ata_host_resume(host);
6068 #endif /* CONFIG_PCI */
6071 static int __init ata_init(void)
6073 ata_probe_timeout *= HZ;
6074 ata_wq = create_workqueue("ata");
6078 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6080 destroy_workqueue(ata_wq);
6084 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6088 static void __exit ata_exit(void)
6090 destroy_workqueue(ata_wq);
6091 destroy_workqueue(ata_aux_wq);
6094 subsys_initcall(ata_init);
6095 module_exit(ata_exit);
6097 static unsigned long ratelimit_time;
6098 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6100 int ata_ratelimit(void)
6103 unsigned long flags;
6105 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6107 if (time_after(jiffies, ratelimit_time)) {
6109 ratelimit_time = jiffies + (HZ/5);
6113 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6119 * ata_wait_register - wait until register value changes
6120 * @reg: IO-mapped register
6121 * @mask: Mask to apply to read register value
6122 * @val: Wait condition
6123 * @interval_msec: polling interval in milliseconds
6124 * @timeout_msec: timeout in milliseconds
6126 * Waiting for some bits of register to change is a common
6127 * operation for ATA controllers. This function reads 32bit LE
6128 * IO-mapped register @reg and tests for the following condition.
6130 * (*@reg & mask) != val
6132 * If the condition is met, it returns; otherwise, the process is
6133 * repeated after @interval_msec until timeout.
6136 * Kernel thread context (may sleep)
6139 * The final register value.
6141 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6142 unsigned long interval_msec,
6143 unsigned long timeout_msec)
6145 unsigned long timeout;
6148 tmp = ioread32(reg);
6150 /* Calculate timeout _after_ the first read to make sure
6151 * preceding writes reach the controller before starting to
6152 * eat away the timeout.
6154 timeout = jiffies + (timeout_msec * HZ) / 1000;
6156 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6157 msleep(interval_msec);
6158 tmp = ioread32(reg);
6167 static void ata_dummy_noret(struct ata_port *ap) { }
6168 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6169 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6171 static u8 ata_dummy_check_status(struct ata_port *ap)
6176 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6178 return AC_ERR_SYSTEM;
6181 const struct ata_port_operations ata_dummy_port_ops = {
6182 .port_disable = ata_port_disable,
6183 .check_status = ata_dummy_check_status,
6184 .check_altstatus = ata_dummy_check_status,
6185 .dev_select = ata_noop_dev_select,
6186 .qc_prep = ata_noop_qc_prep,
6187 .qc_issue = ata_dummy_qc_issue,
6188 .freeze = ata_dummy_noret,
6189 .thaw = ata_dummy_noret,
6190 .error_handler = ata_dummy_noret,
6191 .post_internal_cmd = ata_dummy_qc_noret,
6192 .irq_clear = ata_dummy_noret,
6193 .port_start = ata_dummy_ret0,
6194 .port_stop = ata_dummy_noret,
6198 * libata is essentially a library of internal helper functions for
6199 * low-level ATA host controller drivers. As such, the API/ABI is
6200 * likely to change as new drivers are added and updated.
6201 * Do not depend on ABI/API stability.
6204 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6205 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6206 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6207 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6208 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6209 EXPORT_SYMBOL_GPL(ata_std_ports);
6210 EXPORT_SYMBOL_GPL(ata_host_init);
6211 EXPORT_SYMBOL_GPL(ata_device_add);
6212 EXPORT_SYMBOL_GPL(ata_host_detach);
6213 EXPORT_SYMBOL_GPL(ata_sg_init);
6214 EXPORT_SYMBOL_GPL(ata_sg_init_one);
6215 EXPORT_SYMBOL_GPL(ata_hsm_move);
6216 EXPORT_SYMBOL_GPL(ata_qc_complete);
6217 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6218 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6219 EXPORT_SYMBOL_GPL(ata_tf_load);
6220 EXPORT_SYMBOL_GPL(ata_tf_read);
6221 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6222 EXPORT_SYMBOL_GPL(ata_std_dev_select);
6223 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6224 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6225 EXPORT_SYMBOL_GPL(ata_check_status);
6226 EXPORT_SYMBOL_GPL(ata_altstatus);
6227 EXPORT_SYMBOL_GPL(ata_exec_command);
6228 EXPORT_SYMBOL_GPL(ata_port_start);
6229 EXPORT_SYMBOL_GPL(ata_interrupt);
6230 EXPORT_SYMBOL_GPL(ata_data_xfer);
6231 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6232 EXPORT_SYMBOL_GPL(ata_qc_prep);
6233 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6234 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6235 EXPORT_SYMBOL_GPL(ata_bmdma_start);
6236 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6237 EXPORT_SYMBOL_GPL(ata_bmdma_status);
6238 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6239 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6240 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6241 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6242 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6243 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6244 EXPORT_SYMBOL_GPL(ata_port_probe);
6245 EXPORT_SYMBOL_GPL(sata_set_spd);
6246 EXPORT_SYMBOL_GPL(sata_phy_debounce);
6247 EXPORT_SYMBOL_GPL(sata_phy_resume);
6248 EXPORT_SYMBOL_GPL(sata_phy_reset);
6249 EXPORT_SYMBOL_GPL(__sata_phy_reset);
6250 EXPORT_SYMBOL_GPL(ata_bus_reset);
6251 EXPORT_SYMBOL_GPL(ata_std_prereset);
6252 EXPORT_SYMBOL_GPL(ata_std_softreset);
6253 EXPORT_SYMBOL_GPL(sata_port_hardreset);
6254 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6255 EXPORT_SYMBOL_GPL(ata_std_postreset);
6256 EXPORT_SYMBOL_GPL(ata_dev_classify);
6257 EXPORT_SYMBOL_GPL(ata_dev_pair);
6258 EXPORT_SYMBOL_GPL(ata_port_disable);
6259 EXPORT_SYMBOL_GPL(ata_ratelimit);
6260 EXPORT_SYMBOL_GPL(ata_wait_register);
6261 EXPORT_SYMBOL_GPL(ata_busy_sleep);
6262 EXPORT_SYMBOL_GPL(ata_port_queue_task);
6263 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6264 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6265 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6266 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6267 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6268 EXPORT_SYMBOL_GPL(ata_host_intr);
6269 EXPORT_SYMBOL_GPL(sata_scr_valid);
6270 EXPORT_SYMBOL_GPL(sata_scr_read);
6271 EXPORT_SYMBOL_GPL(sata_scr_write);
6272 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6273 EXPORT_SYMBOL_GPL(ata_port_online);
6274 EXPORT_SYMBOL_GPL(ata_port_offline);
6275 EXPORT_SYMBOL_GPL(ata_host_suspend);
6276 EXPORT_SYMBOL_GPL(ata_host_resume);
6277 EXPORT_SYMBOL_GPL(ata_id_string);
6278 EXPORT_SYMBOL_GPL(ata_id_c_string);
6279 EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6280 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6282 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6283 EXPORT_SYMBOL_GPL(ata_timing_compute);
6284 EXPORT_SYMBOL_GPL(ata_timing_merge);
6287 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6288 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6289 EXPORT_SYMBOL_GPL(ata_pci_init_one);
6290 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6291 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6292 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6293 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6294 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6295 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6296 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6297 #endif /* CONFIG_PCI */
6299 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6300 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6302 EXPORT_SYMBOL_GPL(ata_eng_timeout);
6303 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6304 EXPORT_SYMBOL_GPL(ata_port_abort);
6305 EXPORT_SYMBOL_GPL(ata_port_freeze);
6306 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6307 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6308 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6309 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6310 EXPORT_SYMBOL_GPL(ata_do_eh);
6311 EXPORT_SYMBOL_GPL(ata_irq_on);
6312 EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6313 EXPORT_SYMBOL_GPL(ata_irq_ack);
6314 EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);