4 * Basic PIO and command management functionality.
6 * This code was split off from ide.c. See ide.c for history and original
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/hdreg.h>
44 #include <linux/completion.h>
45 #include <linux/reboot.h>
46 #include <linux/cdrom.h>
47 #include <linux/seq_file.h>
48 #include <linux/device.h>
49 #include <linux/kmod.h>
50 #include <linux/scatterlist.h>
51 #include <linux/bitops.h>
53 #include <asm/byteorder.h>
55 #include <asm/uaccess.h>
58 static int __ide_end_request(ide_drive_t *drive, struct request *rq,
59 int uptodate, unsigned int nr_bytes, int dequeue)
66 error = uptodate ? uptodate : -EIO;
69 * if failfast is set on a request, override number of sectors and
70 * complete the whole request right now
72 if (blk_noretry_request(rq) && error)
73 nr_bytes = rq->hard_nr_sectors << 9;
75 if (!blk_fs_request(rq) && error && !rq->errors)
79 * decide whether to reenable DMA -- 3 is a random magic for now,
80 * if we DMA timeout more than 3 times, just stay in PIO
82 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
83 drive->retry_pio <= 3) {
84 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
88 spin_lock_irqsave(&ide_lock, flags);
89 if (!__blk_end_request(rq, error, nr_bytes))
91 spin_unlock_irqrestore(&ide_lock, flags);
93 if (ret == 0 && dequeue)
94 drive->hwif->hwgroup->rq = NULL;
100 * ide_end_request - complete an IDE I/O
101 * @drive: IDE device for the I/O
103 * @nr_sectors: number of sectors completed
105 * This is our end_request wrapper function. We complete the I/O
106 * update random number input and dequeue the request, which if
107 * it was tagged may be out of order.
110 int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
112 unsigned int nr_bytes = nr_sectors << 9;
113 struct request *rq = drive->hwif->hwgroup->rq;
116 if (blk_pc_request(rq))
117 nr_bytes = rq->data_len;
119 nr_bytes = rq->hard_cur_sectors << 9;
122 return __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
124 EXPORT_SYMBOL(ide_end_request);
126 static void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
128 struct request_pm_state *pm = rq->data;
131 printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
132 drive->name, pm->pm_step);
134 if (drive->media != ide_disk)
137 switch (pm->pm_step) {
138 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
139 if (pm->pm_state == PM_EVENT_FREEZE)
140 pm->pm_step = IDE_PM_COMPLETED;
142 pm->pm_step = IDE_PM_STANDBY;
144 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
145 pm->pm_step = IDE_PM_COMPLETED;
147 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
148 pm->pm_step = IDE_PM_IDLE;
150 case IDE_PM_IDLE: /* Resume step 2 (idle)*/
151 pm->pm_step = IDE_PM_RESTORE_DMA;
156 static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
158 struct request_pm_state *pm = rq->data;
159 ide_task_t *args = rq->special;
161 memset(args, 0, sizeof(*args));
163 switch (pm->pm_step) {
164 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
165 if (drive->media != ide_disk)
167 /* Not supported? Switch to next step now. */
168 if (ata_id_flush_enabled(drive->id) == 0 ||
169 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
170 ide_complete_power_step(drive, rq);
173 if (ata_id_flush_ext_enabled(drive->id))
174 args->tf.command = ATA_CMD_FLUSH_EXT;
176 args->tf.command = ATA_CMD_FLUSH;
178 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
179 args->tf.command = ATA_CMD_STANDBYNOW1;
181 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
182 ide_set_max_pio(drive);
184 * skip IDE_PM_IDLE for ATAPI devices
186 if (drive->media != ide_disk)
187 pm->pm_step = IDE_PM_RESTORE_DMA;
189 ide_complete_power_step(drive, rq);
191 case IDE_PM_IDLE: /* Resume step 2 (idle) */
192 args->tf.command = ATA_CMD_IDLEIMMEDIATE;
194 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
196 * Right now, all we do is call ide_set_dma(drive),
197 * we could be smarter and check for current xfer_speed
198 * in struct drive etc...
200 if (drive->hwif->dma_ops == NULL)
203 * TODO: respect IDE_DFLAG_USING_DMA
209 pm->pm_step = IDE_PM_COMPLETED;
213 args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
214 args->data_phase = TASKFILE_NO_DATA;
215 return do_rw_taskfile(drive, args);
219 * ide_end_dequeued_request - complete an IDE I/O
220 * @drive: IDE device for the I/O
222 * @nr_sectors: number of sectors completed
224 * Complete an I/O that is no longer on the request queue. This
225 * typically occurs when we pull the request and issue a REQUEST_SENSE.
226 * We must still finish the old request but we must not tamper with the
227 * queue in the meantime.
229 * NOTE: This path does not handle barrier, but barrier is not supported
233 int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
234 int uptodate, int nr_sectors)
236 BUG_ON(!blk_rq_started(rq));
238 return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
240 EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
244 * ide_complete_pm_request - end the current Power Management request
245 * @drive: target drive
248 * This function cleans up the current PM request and stops the queue
251 static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
256 printk("%s: completing PM request, %s\n", drive->name,
257 blk_pm_suspend_request(rq) ? "suspend" : "resume");
259 spin_lock_irqsave(&ide_lock, flags);
260 if (blk_pm_suspend_request(rq)) {
261 blk_stop_queue(drive->queue);
263 drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
264 blk_start_queue(drive->queue);
266 spin_unlock_irqrestore(&ide_lock, flags);
268 drive->hwif->hwgroup->rq = NULL;
270 spin_lock_irqsave(&ide_lock, flags);
271 if (__blk_end_request(rq, 0, 0))
273 spin_unlock_irqrestore(&ide_lock, flags);
277 * ide_end_drive_cmd - end an explicit drive command
282 * Clean up after success/failure of an explicit drive command.
283 * These get thrown onto the queue so they are synchronized with
284 * real I/O operations on the drive.
286 * In LBA48 mode we have to read the register set twice to get
287 * all the extra information out.
290 void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
292 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
293 struct request *rq = hwgroup->rq;
296 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
297 ide_task_t *task = (ide_task_t *)rq->special;
300 rq->errors = !OK_STAT(stat, ATA_DRDY, BAD_STAT);
303 struct ide_taskfile *tf = &task->tf;
308 drive->hwif->tp_ops->tf_read(drive, task);
310 if (task->tf_flags & IDE_TFLAG_DYN)
313 } else if (blk_pm_request(rq)) {
314 struct request_pm_state *pm = rq->data;
316 ide_complete_power_step(drive, rq);
317 if (pm->pm_step == IDE_PM_COMPLETED)
318 ide_complete_pm_request(drive, rq);
326 spin_lock_irqsave(&ide_lock, flags);
327 if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0),
330 spin_unlock_irqrestore(&ide_lock, flags);
332 EXPORT_SYMBOL(ide_end_drive_cmd);
334 static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
339 drv = *(ide_driver_t **)rq->rq_disk->private_data;
340 drv->end_request(drive, 0, 0);
342 ide_end_request(drive, 0, 0);
345 static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
347 ide_hwif_t *hwif = drive->hwif;
349 if ((stat & ATA_BUSY) ||
350 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
351 /* other bits are useless when BUSY */
352 rq->errors |= ERROR_RESET;
353 } else if (stat & ATA_ERR) {
354 /* err has different meaning on cdrom and tape */
355 if (err == ATA_ABORTED) {
356 if ((drive->dev_flags & IDE_DFLAG_LBA) &&
357 /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
358 hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
360 } else if ((err & BAD_CRC) == BAD_CRC) {
361 /* UDMA crc error, just retry the operation */
363 } else if (err & (ATA_BBK | ATA_UNC)) {
364 /* retries won't help these */
365 rq->errors = ERROR_MAX;
366 } else if (err & ATA_TRK0NF) {
367 /* help it find track zero */
368 rq->errors |= ERROR_RECAL;
372 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
373 (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
374 int nsect = drive->mult_count ? drive->mult_count : 1;
376 ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
379 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
380 ide_kill_rq(drive, rq);
384 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
385 rq->errors |= ERROR_RESET;
387 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
389 return ide_do_reset(drive);
392 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
393 drive->special.b.recalibrate = 1;
400 static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
402 ide_hwif_t *hwif = drive->hwif;
404 if ((stat & ATA_BUSY) ||
405 ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
406 /* other bits are useless when BUSY */
407 rq->errors |= ERROR_RESET;
409 /* add decoding error stuff */
412 if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
414 hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
416 if (rq->errors >= ERROR_MAX) {
417 ide_kill_rq(drive, rq);
419 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
421 return ide_do_reset(drive);
430 __ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
432 if (drive->media == ide_disk)
433 return ide_ata_error(drive, rq, stat, err);
434 return ide_atapi_error(drive, rq, stat, err);
437 EXPORT_SYMBOL_GPL(__ide_error);
440 * ide_error - handle an error on the IDE
441 * @drive: drive the error occurred on
442 * @msg: message to report
445 * ide_error() takes action based on the error returned by the drive.
446 * For normal I/O that may well include retries. We deal with
447 * both new-style (taskfile) and old style command handling here.
448 * In the case of taskfile command handling there is work left to
452 ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
457 err = ide_dump_status(drive, msg, stat);
459 if ((rq = HWGROUP(drive)->rq) == NULL)
462 /* retry only "normal" I/O: */
463 if (!blk_fs_request(rq)) {
465 ide_end_drive_cmd(drive, stat, err);
472 drv = *(ide_driver_t **)rq->rq_disk->private_data;
473 return drv->error(drive, rq, stat, err);
475 return __ide_error(drive, rq, stat, err);
478 EXPORT_SYMBOL_GPL(ide_error);
480 static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
482 tf->nsect = drive->sect;
483 tf->lbal = drive->sect;
484 tf->lbam = drive->cyl;
485 tf->lbah = drive->cyl >> 8;
486 tf->device = (drive->head - 1) | drive->select;
487 tf->command = ATA_CMD_INIT_DEV_PARAMS;
490 static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
492 tf->nsect = drive->sect;
493 tf->command = ATA_CMD_RESTORE;
496 static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
498 tf->nsect = drive->mult_req;
499 tf->command = ATA_CMD_SET_MULTI;
502 static ide_startstop_t ide_disk_special(ide_drive_t *drive)
504 special_t *s = &drive->special;
507 memset(&args, 0, sizeof(ide_task_t));
508 args.data_phase = TASKFILE_NO_DATA;
510 if (s->b.set_geometry) {
511 s->b.set_geometry = 0;
512 ide_tf_set_specify_cmd(drive, &args.tf);
513 } else if (s->b.recalibrate) {
514 s->b.recalibrate = 0;
515 ide_tf_set_restore_cmd(drive, &args.tf);
516 } else if (s->b.set_multmode) {
517 s->b.set_multmode = 0;
518 ide_tf_set_setmult_cmd(drive, &args.tf);
520 int special = s->all;
522 printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
526 args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
527 IDE_TFLAG_CUSTOM_HANDLER;
529 do_rw_taskfile(drive, &args);
535 * do_special - issue some special commands
536 * @drive: drive the command is for
538 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
539 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
541 * It used to do much more, but has been scaled back.
544 static ide_startstop_t do_special (ide_drive_t *drive)
546 special_t *s = &drive->special;
549 printk("%s: do_special: 0x%02x\n", drive->name, s->all);
551 if (drive->media == ide_disk)
552 return ide_disk_special(drive);
559 void ide_map_sg(ide_drive_t *drive, struct request *rq)
561 ide_hwif_t *hwif = drive->hwif;
562 struct scatterlist *sg = hwif->sg_table;
564 if (hwif->sg_mapped) /* needed by ide-scsi */
567 if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) {
568 hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
570 sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
575 EXPORT_SYMBOL_GPL(ide_map_sg);
577 void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
579 ide_hwif_t *hwif = drive->hwif;
581 hwif->nsect = hwif->nleft = rq->nr_sectors;
586 EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
589 * execute_drive_command - issue special drive command
590 * @drive: the drive to issue the command on
591 * @rq: the request structure holding the command
593 * execute_drive_cmd() issues a special drive command, usually
594 * initiated by ioctl() from the external hdparm program. The
595 * command can be a drive command, drive task or taskfile
596 * operation. Weirdly you can call it with NULL to wait for
597 * all commands to finish. Don't do this as that is due to change
600 static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
603 ide_hwif_t *hwif = HWIF(drive);
604 ide_task_t *task = rq->special;
607 hwif->data_phase = task->data_phase;
609 switch (hwif->data_phase) {
610 case TASKFILE_MULTI_OUT:
612 case TASKFILE_MULTI_IN:
614 ide_init_sg_cmd(drive, rq);
615 ide_map_sg(drive, rq);
620 return do_rw_taskfile(drive, task);
624 * NULL is actually a valid way of waiting for
625 * all current requests to be flushed from the queue.
628 printk("%s: DRIVE_CMD (null)\n", drive->name);
630 ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
631 ide_read_error(drive));
636 int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
639 struct request_queue *q = drive->queue;
643 if (!(setting->flags & DS_SYNC))
644 return setting->set(drive, arg);
646 rq = blk_get_request(q, READ, __GFP_WAIT);
647 rq->cmd_type = REQ_TYPE_SPECIAL;
649 rq->cmd[0] = REQ_DEVSET_EXEC;
650 *(int *)&rq->cmd[1] = arg;
651 rq->special = setting->set;
653 if (blk_execute_rq(q, NULL, rq, 0))
659 EXPORT_SYMBOL_GPL(ide_devset_execute);
661 static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
665 if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) {
667 struct ide_taskfile *tf = &task.tf;
669 memset(&task, 0, sizeof(task));
670 if (cmd == REQ_PARK_HEADS) {
671 drive->sleep = *(unsigned long *)rq->special;
672 drive->dev_flags |= IDE_DFLAG_SLEEPING;
673 tf->command = ATA_CMD_IDLEIMMEDIATE;
678 task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER;
679 } else /* cmd == REQ_UNPARK_HEADS */
680 tf->command = ATA_CMD_CHK_POWER;
682 task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
684 drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA;
685 return do_rw_taskfile(drive, &task);
689 case REQ_DEVSET_EXEC:
691 int err, (*setfunc)(ide_drive_t *, int) = rq->special;
693 err = setfunc(drive, *(int *)&rq->cmd[1]);
698 ide_end_request(drive, err, 0);
701 case REQ_DRIVE_RESET:
702 return ide_do_reset(drive);
704 blk_dump_rq_flags(rq, "ide_special_rq - bad request");
705 ide_end_request(drive, 0, 0);
710 static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
712 struct request_pm_state *pm = rq->data;
714 if (blk_pm_suspend_request(rq) &&
715 pm->pm_step == IDE_PM_START_SUSPEND)
716 /* Mark drive blocked when starting the suspend sequence. */
717 drive->dev_flags |= IDE_DFLAG_BLOCKED;
718 else if (blk_pm_resume_request(rq) &&
719 pm->pm_step == IDE_PM_START_RESUME) {
721 * The first thing we do on wakeup is to wait for BSY bit to
722 * go away (with a looong timeout) as a drive on this hwif may
723 * just be POSTing itself.
724 * We do that before even selecting as the "other" device on
725 * the bus may be broken enough to walk on our toes at this
728 ide_hwif_t *hwif = drive->hwif;
731 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
733 rc = ide_wait_not_busy(hwif, 35000);
735 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
737 hwif->tp_ops->set_irq(hwif, 1);
738 rc = ide_wait_not_busy(hwif, 100000);
740 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
745 * start_request - start of I/O and command issuing for IDE
747 * start_request() initiates handling of a new I/O request. It
748 * accepts commands and I/O (read/write) requests.
750 * FIXME: this function needs a rename
753 static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
755 ide_startstop_t startstop;
757 BUG_ON(!blk_rq_started(rq));
760 printk("%s: start_request: current=0x%08lx\n",
761 HWIF(drive)->name, (unsigned long) rq);
764 /* bail early if we've exceeded max_failures */
765 if (drive->max_failures && (drive->failures > drive->max_failures)) {
766 rq->cmd_flags |= REQ_FAILED;
770 if (blk_pm_request(rq))
771 ide_check_pm_state(drive, rq);
774 if (ide_wait_stat(&startstop, drive, drive->ready_stat,
775 ATA_BUSY | ATA_DRQ, WAIT_READY)) {
776 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
779 if (!drive->special.all) {
783 * We reset the drive so we need to issue a SETFEATURES.
784 * Do it _after_ do_special() restored device parameters.
786 if (drive->current_speed == 0xff)
787 ide_config_drive_speed(drive, drive->desired_speed);
789 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
790 return execute_drive_cmd(drive, rq);
791 else if (blk_pm_request(rq)) {
792 struct request_pm_state *pm = rq->data;
794 printk("%s: start_power_step(step: %d)\n",
795 drive->name, pm->pm_step);
797 startstop = ide_start_power_step(drive, rq);
798 if (startstop == ide_stopped &&
799 pm->pm_step == IDE_PM_COMPLETED)
800 ide_complete_pm_request(drive, rq);
802 } else if (!rq->rq_disk && blk_special_request(rq))
804 * TODO: Once all ULDs have been modified to
805 * check for specific op codes rather than
806 * blindly accepting any special request, the
807 * check for ->rq_disk above may be replaced
808 * by a more suitable mechanism or even
811 return ide_special_rq(drive, rq);
813 drv = *(ide_driver_t **)rq->rq_disk->private_data;
815 return drv->do_request(drive, rq, rq->sector);
817 return do_special(drive);
819 ide_kill_rq(drive, rq);
824 * ide_stall_queue - pause an IDE device
825 * @drive: drive to stall
826 * @timeout: time to stall for (jiffies)
828 * ide_stall_queue() can be used by a drive to give excess bandwidth back
829 * to the hwgroup by sleeping for timeout jiffies.
832 void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
834 if (timeout > WAIT_WORSTCASE)
835 timeout = WAIT_WORSTCASE;
836 drive->sleep = timeout + jiffies;
837 drive->dev_flags |= IDE_DFLAG_SLEEPING;
840 EXPORT_SYMBOL(ide_stall_queue);
842 #define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time)
845 * choose_drive - select a drive to service
846 * @hwgroup: hardware group to select on
848 * choose_drive() selects the next drive which will be serviced.
849 * This is necessary because the IDE layer can't issue commands
850 * to both drives on the same cable, unlike SCSI.
853 static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup)
855 ide_drive_t *drive, *best;
859 drive = hwgroup->drive;
862 * drive is doing pre-flush, ordered write, post-flush sequence. even
863 * though that is 3 requests, it must be seen as a single transaction.
864 * we must not preempt this drive until that is complete
866 if (blk_queue_flushing(drive->queue)) {
868 * small race where queue could get replugged during
869 * the 3-request flush cycle, just yank the plug since
870 * we want it to finish asap
872 blk_remove_plug(drive->queue);
877 u8 dev_s = !!(drive->dev_flags & IDE_DFLAG_SLEEPING);
878 u8 best_s = (best && !!(best->dev_flags & IDE_DFLAG_SLEEPING));
880 if ((dev_s == 0 || time_after_eq(jiffies, drive->sleep)) &&
881 !elv_queue_empty(drive->queue)) {
883 (dev_s && (best_s == 0 || time_before(drive->sleep, best->sleep))) ||
884 (best_s == 0 && time_before(WAKEUP(drive), WAKEUP(best)))) {
885 if (!blk_queue_plugged(drive->queue))
889 } while ((drive = drive->next) != hwgroup->drive);
891 if (best && (best->dev_flags & IDE_DFLAG_NICE1) &&
892 (best->dev_flags & IDE_DFLAG_SLEEPING) == 0 &&
893 best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) {
894 long t = (signed long)(WAKEUP(best) - jiffies);
895 if (t >= WAIT_MIN_SLEEP) {
897 * We *may* have some time to spare, but first let's see if
898 * someone can potentially benefit from our nice mood today..
902 if ((drive->dev_flags & IDE_DFLAG_SLEEPING) == 0
903 && time_before(jiffies - best->service_time, WAKEUP(drive))
904 && time_before(WAKEUP(drive), jiffies + t))
906 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP));
909 } while ((drive = drive->next) != best);
916 * Issue a new request to a drive from hwgroup
917 * Caller must have already done spin_lock_irqsave(&ide_lock, ..);
919 * A hwgroup is a serialized group of IDE interfaces. Usually there is
920 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
921 * may have both interfaces in a single hwgroup to "serialize" access.
922 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped
923 * together into one hwgroup for serialized access.
925 * Note also that several hwgroups can end up sharing a single IRQ,
926 * possibly along with many other devices. This is especially common in
927 * PCI-based systems with off-board IDE controller cards.
929 * The IDE driver uses the single global ide_lock spinlock to protect
930 * access to the request queues, and to protect the hwgroup->busy flag.
932 * The first thread into the driver for a particular hwgroup sets the
933 * hwgroup->busy flag to indicate that this hwgroup is now active,
934 * and then initiates processing of the top request from the request queue.
936 * Other threads attempting entry notice the busy setting, and will simply
937 * queue their new requests and exit immediately. Note that hwgroup->busy
938 * remains set even when the driver is merely awaiting the next interrupt.
939 * Thus, the meaning is "this hwgroup is busy processing a request".
941 * When processing of a request completes, the completing thread or IRQ-handler
942 * will start the next request from the queue. If no more work remains,
943 * the driver will clear the hwgroup->busy flag and exit.
945 * The ide_lock (spinlock) is used to protect all access to the
946 * hwgroup->busy flag, but is otherwise not needed for most processing in
947 * the driver. This makes the driver much more friendlier to shared IRQs
948 * than previous designs, while remaining 100% (?) SMP safe and capable.
950 static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
955 ide_startstop_t startstop;
958 /* caller must own ide_lock */
959 BUG_ON(!irqs_disabled());
961 while (!hwgroup->busy) {
964 ide_get_lock(ide_intr, hwgroup);
965 drive = choose_drive(hwgroup);
968 unsigned long sleep = 0; /* shut up, gcc */
970 drive = hwgroup->drive;
972 if ((drive->dev_flags & IDE_DFLAG_SLEEPING) &&
974 time_before(drive->sleep, sleep))) {
976 sleep = drive->sleep;
978 } while ((drive = drive->next) != hwgroup->drive);
981 * Take a short snooze, and then wake up this hwgroup again.
982 * This gives other hwgroups on the same a chance to
983 * play fairly with us, just in case there are big differences
984 * in relative throughputs.. don't want to hog the cpu too much.
986 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP))
987 sleep = jiffies + WAIT_MIN_SLEEP;
989 if (timer_pending(&hwgroup->timer))
990 printk(KERN_CRIT "ide_set_handler: timer already active\n");
992 /* so that ide_timer_expiry knows what to do */
993 hwgroup->sleeping = 1;
994 hwgroup->req_gen_timer = hwgroup->req_gen;
995 mod_timer(&hwgroup->timer, sleep);
996 /* we purposely leave hwgroup->busy==1
999 /* Ugly, but how can we sleep for the lock
1000 * otherwise? perhaps from tq_disk?
1003 /* for atari only */
1008 /* no more work for this hwgroup (for now) */
1013 if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) {
1015 * set nIEN for previous hwif, drives in the
1016 * quirk_list may not like intr setups/cleanups
1018 if (drive->quirk_list != 1)
1019 hwif->tp_ops->set_irq(hwif, 0);
1021 hwgroup->hwif = hwif;
1022 hwgroup->drive = drive;
1023 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
1024 drive->service_start = jiffies;
1026 if (blk_queue_plugged(drive->queue)) {
1027 printk(KERN_ERR "ide: huh? queue was plugged!\n");
1032 * we know that the queue isn't empty, but this can happen
1033 * if the q->prep_rq_fn() decides to kill a request
1035 rq = elv_next_request(drive->queue);
1042 * Sanity: don't accept a request that isn't a PM request
1043 * if we are currently power managed. This is very important as
1044 * blk_stop_queue() doesn't prevent the elv_next_request()
1045 * above to return us whatever is in the queue. Since we call
1046 * ide_do_request() ourselves, we end up taking requests while
1047 * the queue is blocked...
1049 * We let requests forced at head of queue with ide-preempt
1050 * though. I hope that doesn't happen too much, hopefully not
1051 * unless the subdriver triggers such a thing in its own PM
1054 * We count how many times we loop here to make sure we service
1055 * all drives in the hwgroup without looping for ever
1057 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
1058 blk_pm_request(rq) == 0 &&
1059 (rq->cmd_flags & REQ_PREEMPT) == 0) {
1060 drive = drive->next ? drive->next : hwgroup->drive;
1061 if (loops++ < 4 && !blk_queue_plugged(drive->queue))
1063 /* We clear busy, there should be no pending ATA command at this point. */
1071 * Some systems have trouble with IDE IRQs arriving while
1072 * the driver is still setting things up. So, here we disable
1073 * the IRQ used by this interface while the request is being started.
1074 * This may look bad at first, but pretty much the same thing
1075 * happens anyway when any interrupt comes in, IDE or otherwise
1076 * -- the kernel masks the IRQ while it is being handled.
1078 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1079 disable_irq_nosync(hwif->irq);
1080 spin_unlock(&ide_lock);
1081 local_irq_enable_in_hardirq();
1082 /* allow other IRQs while we start this request */
1083 startstop = start_request(drive, rq);
1084 spin_lock_irq(&ide_lock);
1085 if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
1086 enable_irq(hwif->irq);
1087 if (startstop == ide_stopped)
1093 * Passes the stuff to ide_do_request
1095 void do_ide_request(struct request_queue *q)
1097 ide_drive_t *drive = q->queuedata;
1099 ide_do_request(HWGROUP(drive), IDE_NO_IRQ);
1103 * un-busy the hwgroup etc, and clear any pending DMA status. we want to
1104 * retry the current request in pio mode instead of risking tossing it
1107 static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1109 ide_hwif_t *hwif = HWIF(drive);
1111 ide_startstop_t ret = ide_stopped;
1114 * end current dma transaction
1118 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
1119 (void)hwif->dma_ops->dma_end(drive);
1120 ret = ide_error(drive, "dma timeout error",
1121 hwif->tp_ops->read_status(hwif));
1123 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1124 hwif->dma_ops->dma_timeout(drive);
1128 * disable dma for now, but remember that we did so because of
1129 * a timeout -- we'll reenable after we finish this next request
1130 * (or rather the first chunk of it) in pio.
1132 drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
1134 ide_dma_off_quietly(drive);
1137 * un-busy drive etc (hwgroup->busy is cleared on return) and
1138 * make sure request is sane
1140 rq = HWGROUP(drive)->rq;
1145 HWGROUP(drive)->rq = NULL;
1152 rq->sector = rq->bio->bi_sector;
1153 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
1154 rq->hard_cur_sectors = rq->current_nr_sectors;
1155 rq->buffer = bio_data(rq->bio);
1161 * ide_timer_expiry - handle lack of an IDE interrupt
1162 * @data: timer callback magic (hwgroup)
1164 * An IDE command has timed out before the expected drive return
1165 * occurred. At this point we attempt to clean up the current
1166 * mess. If the current handler includes an expiry handler then
1167 * we invoke the expiry handler, and providing it is happy the
1168 * work is done. If that fails we apply generic recovery rules
1169 * invoking the handler and checking the drive DMA status. We
1170 * have an excessively incestuous relationship with the DMA
1171 * logic that wants cleaning up.
1174 void ide_timer_expiry (unsigned long data)
1176 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
1177 ide_handler_t *handler;
1178 ide_expiry_t *expiry;
1179 unsigned long flags;
1180 unsigned long wait = -1;
1182 spin_lock_irqsave(&ide_lock, flags);
1184 if (((handler = hwgroup->handler) == NULL) ||
1185 (hwgroup->req_gen != hwgroup->req_gen_timer)) {
1187 * Either a marginal timeout occurred
1188 * (got the interrupt just as timer expired),
1189 * or we were "sleeping" to give other devices a chance.
1190 * Either way, we don't really want to complain about anything.
1192 if (hwgroup->sleeping) {
1193 hwgroup->sleeping = 0;
1197 ide_drive_t *drive = hwgroup->drive;
1199 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n");
1200 hwgroup->handler = NULL;
1203 ide_startstop_t startstop = ide_stopped;
1204 if (!hwgroup->busy) {
1205 hwgroup->busy = 1; /* paranoia */
1206 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name);
1208 if ((expiry = hwgroup->expiry) != NULL) {
1210 if ((wait = expiry(drive)) > 0) {
1212 hwgroup->timer.expires = jiffies + wait;
1213 hwgroup->req_gen_timer = hwgroup->req_gen;
1214 add_timer(&hwgroup->timer);
1215 spin_unlock_irqrestore(&ide_lock, flags);
1219 hwgroup->handler = NULL;
1221 * We need to simulate a real interrupt when invoking
1222 * the handler() function, which means we need to
1223 * globally mask the specific IRQ:
1225 spin_unlock(&ide_lock);
1227 /* disable_irq_nosync ?? */
1228 disable_irq(hwif->irq);
1230 * as if we were handling an interrupt */
1231 local_irq_disable();
1232 if (hwgroup->polling) {
1233 startstop = handler(drive);
1234 } else if (drive_is_ready(drive)) {
1235 if (drive->waiting_for_dma)
1236 hwif->dma_ops->dma_lost_irq(drive);
1237 (void)ide_ack_intr(hwif);
1238 printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
1239 startstop = handler(drive);
1241 if (drive->waiting_for_dma) {
1242 startstop = ide_dma_timeout_retry(drive, wait);
1245 ide_error(drive, "irq timeout",
1246 hwif->tp_ops->read_status(hwif));
1248 drive->service_time = jiffies - drive->service_start;
1249 spin_lock_irq(&ide_lock);
1250 enable_irq(hwif->irq);
1251 if (startstop == ide_stopped)
1255 ide_do_request(hwgroup, IDE_NO_IRQ);
1256 spin_unlock_irqrestore(&ide_lock, flags);
1260 * unexpected_intr - handle an unexpected IDE interrupt
1261 * @irq: interrupt line
1262 * @hwgroup: hwgroup being processed
1264 * There's nothing really useful we can do with an unexpected interrupt,
1265 * other than reading the status register (to clear it), and logging it.
1266 * There should be no way that an irq can happen before we're ready for it,
1267 * so we needn't worry much about losing an "important" interrupt here.
1269 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
1270 * the drive enters "idle", "standby", or "sleep" mode, so if the status
1271 * looks "good", we just ignore the interrupt completely.
1273 * This routine assumes __cli() is in effect when called.
1275 * If an unexpected interrupt happens on irq15 while we are handling irq14
1276 * and if the two interfaces are "serialized" (CMD640), then it looks like
1277 * we could screw up by interfering with a new request being set up for
1280 * In reality, this is a non-issue. The new command is not sent unless
1281 * the drive is ready to accept one, in which case we know the drive is
1282 * not trying to interrupt us. And ide_set_handler() is always invoked
1283 * before completing the issuance of any new drive command, so we will not
1284 * be accidentally invoked as a result of any valid command completion
1287 * Note that we must walk the entire hwgroup here. We know which hwif
1288 * is doing the current command, but we don't know which hwif burped
1292 static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1295 ide_hwif_t *hwif = hwgroup->hwif;
1298 * handle the unexpected interrupt
1301 if (hwif->irq == irq) {
1302 stat = hwif->tp_ops->read_status(hwif);
1304 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
1305 /* Try to not flood the console with msgs */
1306 static unsigned long last_msgtime, count;
1308 if (time_after(jiffies, last_msgtime + HZ)) {
1309 last_msgtime = jiffies;
1310 printk(KERN_ERR "%s%s: unexpected interrupt, "
1311 "status=0x%02x, count=%ld\n",
1313 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count);
1317 } while ((hwif = hwif->next) != hwgroup->hwif);
1321 * ide_intr - default IDE interrupt handler
1322 * @irq: interrupt number
1323 * @dev_id: hwif group
1324 * @regs: unused weirdness from the kernel irq layer
1326 * This is the default IRQ handler for the IDE layer. You should
1327 * not need to override it. If you do be aware it is subtle in
1330 * hwgroup->hwif is the interface in the group currently performing
1331 * a command. hwgroup->drive is the drive and hwgroup->handler is
1332 * the IRQ handler to call. As we issue a command the handlers
1333 * step through multiple states, reassigning the handler to the
1334 * next step in the process. Unlike a smart SCSI controller IDE
1335 * expects the main processor to sequence the various transfer
1336 * stages. We also manage a poll timer to catch up with most
1337 * timeout situations. There are still a few where the handlers
1338 * don't ever decide to give up.
1340 * The handler eventually returns ide_stopped to indicate the
1341 * request completed. At this point we issue the next request
1342 * on the hwgroup and the process begins again.
1345 irqreturn_t ide_intr (int irq, void *dev_id)
1347 unsigned long flags;
1348 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
1351 ide_handler_t *handler;
1352 ide_startstop_t startstop;
1353 irqreturn_t irq_ret = IRQ_NONE;
1355 spin_lock_irqsave(&ide_lock, flags);
1356 hwif = hwgroup->hwif;
1358 if (!ide_ack_intr(hwif))
1361 if ((handler = hwgroup->handler) == NULL || hwgroup->polling) {
1363 * Not expecting an interrupt from this drive.
1364 * That means this could be:
1365 * (1) an interrupt from another PCI device
1366 * sharing the same PCI INT# as us.
1367 * or (2) a drive just entered sleep or standby mode,
1368 * and is interrupting to let us know.
1369 * or (3) a spurious interrupt of unknown origin.
1371 * For PCI, we cannot tell the difference,
1372 * so in that case we just ignore it and hope it goes away.
1374 * FIXME: unexpected_intr should be hwif-> then we can
1375 * remove all the ifdef PCI crap
1377 #ifdef CONFIG_BLK_DEV_IDEPCI
1378 if (hwif->chipset != ide_pci)
1379 #endif /* CONFIG_BLK_DEV_IDEPCI */
1382 * Probably not a shared PCI interrupt,
1383 * so we can safely try to do something about it:
1385 unexpected_intr(irq, hwgroup);
1386 #ifdef CONFIG_BLK_DEV_IDEPCI
1389 * Whack the status register, just in case
1390 * we have a leftover pending IRQ.
1392 (void)hwif->tp_ops->read_status(hwif);
1393 #endif /* CONFIG_BLK_DEV_IDEPCI */
1398 drive = hwgroup->drive;
1401 * This should NEVER happen, and there isn't much
1402 * we could do about it here.
1404 * [Note - this can occur if the drive is hot unplugged]
1409 if (!drive_is_ready(drive))
1411 * This happens regularly when we share a PCI IRQ with
1412 * another device. Unfortunately, it can also happen
1413 * with some buggy drives that trigger the IRQ before
1414 * their status register is up to date. Hopefully we have
1415 * enough advance overhead that the latter isn't a problem.
1419 if (!hwgroup->busy) {
1420 hwgroup->busy = 1; /* paranoia */
1421 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
1423 hwgroup->handler = NULL;
1425 del_timer(&hwgroup->timer);
1426 spin_unlock(&ide_lock);
1428 if (hwif->port_ops && hwif->port_ops->clear_irq)
1429 hwif->port_ops->clear_irq(drive);
1431 if (drive->dev_flags & IDE_DFLAG_UNMASK)
1432 local_irq_enable_in_hardirq();
1434 /* service this interrupt, may set handler for next interrupt */
1435 startstop = handler(drive);
1437 spin_lock_irq(&ide_lock);
1439 * Note that handler() may have set things up for another
1440 * interrupt to occur soon, but it cannot happen until
1441 * we exit from this routine, because it will be the
1442 * same irq as is currently being serviced here, and Linux
1443 * won't allow another of the same (on any CPU) until we return.
1445 drive->service_time = jiffies - drive->service_start;
1446 if (startstop == ide_stopped) {
1447 if (hwgroup->handler == NULL) { /* paranoia */
1449 ide_do_request(hwgroup, hwif->irq);
1451 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler "
1452 "on exit\n", drive->name);
1456 irq_ret = IRQ_HANDLED;
1458 spin_unlock_irqrestore(&ide_lock, flags);
1463 * ide_do_drive_cmd - issue IDE special command
1464 * @drive: device to issue command
1465 * @rq: request to issue
1467 * This function issues a special IDE device request
1468 * onto the request queue.
1470 * the rq is queued at the head of the request queue, displacing
1471 * the currently-being-processed request and this function
1472 * returns immediately without waiting for the new rq to be
1473 * completed. This is VERY DANGEROUS, and is intended for
1474 * careful use by the ATAPI tape/cdrom driver code.
1477 void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
1479 ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
1480 unsigned long flags;
1484 spin_lock_irqsave(&ide_lock, flags);
1485 __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
1486 blk_start_queueing(drive->queue);
1487 spin_unlock_irqrestore(&ide_lock, flags);
1490 EXPORT_SYMBOL(ide_do_drive_cmd);
1492 void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
1494 ide_hwif_t *hwif = drive->hwif;
1497 memset(&task, 0, sizeof(task));
1498 task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM |
1499 IDE_TFLAG_OUT_FEATURE | tf_flags;
1500 task.tf.feature = dma; /* Use PIO/DMA */
1501 task.tf.lbam = bcount & 0xff;
1502 task.tf.lbah = (bcount >> 8) & 0xff;
1504 ide_tf_dump(drive->name, &task.tf);
1505 hwif->tp_ops->set_irq(hwif, 1);
1506 SELECT_MASK(drive, 0);
1507 hwif->tp_ops->tf_load(drive, &task);
1510 EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
1512 void ide_pad_transfer(ide_drive_t *drive, int write, int len)
1514 ide_hwif_t *hwif = drive->hwif;
1519 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
1521 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
1525 EXPORT_SYMBOL_GPL(ide_pad_transfer);