X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=drivers%2Fide%2Fide-io.c;h=28057747c1f8fcf91b34dfd2aac2a375c3ab09d9;hb=98339cbd360b77c3167db287fd611468c2c44559;hp=0fe89a599275de8923ad4a778a72a3ef46afed6b;hpb=9b79ed952bd7344d40152f8a560ad8a0d93f3886;p=linux-2.6-omap-h63xx.git diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 0fe89a59927..28057747c1f 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -295,54 +295,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) spin_unlock_irqrestore(&ide_lock, flags); } -void ide_tf_read(ide_drive_t *drive, ide_task_t *task) -{ - ide_hwif_t *hwif = drive->hwif; - struct ide_taskfile *tf = &task->tf; - - if (task->tf_flags & IDE_TFLAG_IN_DATA) { - u16 data = hwif->INW(hwif->io_ports[IDE_DATA_OFFSET]); - - tf->data = data & 0xff; - tf->hob_data = (data >> 8) & 0xff; - } - - /* be sure we're looking at the low order bits */ - hwif->OUTB(drive->ctl & ~0x80, hwif->io_ports[IDE_CONTROL_OFFSET]); - - if (task->tf_flags & IDE_TFLAG_IN_NSECT) - tf->nsect = hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]); - if (task->tf_flags & IDE_TFLAG_IN_LBAL) - tf->lbal = hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]); - if (task->tf_flags & IDE_TFLAG_IN_LBAM) - tf->lbam = hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]); - if (task->tf_flags & IDE_TFLAG_IN_LBAH) - tf->lbah = hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]); - if (task->tf_flags & IDE_TFLAG_IN_DEVICE) - tf->device = hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]); - - if (task->tf_flags & IDE_TFLAG_LBA48) { - hwif->OUTB(drive->ctl | 0x80, - hwif->io_ports[IDE_CONTROL_OFFSET]); - - if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) - tf->hob_feature = - hwif->INB(hwif->io_ports[IDE_FEATURE_OFFSET]); - if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) - tf->hob_nsect = - hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]); - if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) - tf->hob_lbal = - hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]); - if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) - tf->hob_lbam = - hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]); - if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) - tf->hob_lbah = - hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]); - } -} - /** * ide_end_drive_cmd - end an explicit drive command * @drive: command @@ -378,7 +330,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) tf->error = err; tf->status = stat; - ide_tf_read(drive, task); + drive->hwif->tf_read(drive, task); if (task->tf_flags & IDE_TFLAG_DYN) kfree(task); @@ -406,31 +358,6 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) EXPORT_SYMBOL(ide_end_drive_cmd); -/** - * try_to_flush_leftover_data - flush junk - * @drive: drive to flush - * - * try_to_flush_leftover_data() is invoked in response to a drive - * unexpectedly having its DRQ_STAT bit set. As an alternative to - * resetting the drive, this routine tries to clear the condition - * by read a sector's worth of data from the drive. Of course, - * this may not help if the drive is *waiting* for data from *us*. - */ -static void try_to_flush_leftover_data (ide_drive_t *drive) -{ - int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS; - - if (drive->media != ide_disk) - return; - while (i > 0) { - u32 buffer[16]; - u32 wcount = (i > 16) ? 16 : i; - - i -= wcount; - HWIF(drive)->ata_input_data(drive, buffer, wcount); - } -} - static void ide_kill_rq(ide_drive_t *drive, struct request *rq) { if (rq->rq_disk) { @@ -454,7 +381,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 if (err == ABRT_ERR) { if (drive->select.b.lba && /* some newer drives don't support WIN_SPECIFY */ - hwif->INB(hwif->io_ports[IDE_COMMAND_OFFSET]) == + hwif->INB(hwif->io_ports.command_addr) == WIN_SPECIFY) return ide_stopped; } else if ((err & BAD_CRC) == BAD_CRC) { @@ -470,8 +397,11 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 } if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && - (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) - try_to_flush_leftover_data(drive); + (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) { + int nsect = drive->mult_count ? drive->mult_count : 1; + + ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE); + } if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { ide_kill_rq(drive, rq); @@ -507,8 +437,8 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) /* force an abort */ - hwif->OUTB(WIN_IDLEIMMEDIATE, - hwif->io_ports[IDE_COMMAND_OFFSET]); + hwif->OUTBSYNC(hwif, WIN_IDLEIMMEDIATE, + hwif->io_ports.command_addr); if (rq->errors >= ERROR_MAX) { ide_kill_rq(drive, rq); @@ -1421,7 +1351,7 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) */ do { if (hwif->irq == irq) { - stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); + stat = hwif->INB(hwif->io_ports.status_addr); if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { /* Try to not flood the console with msgs */ static unsigned long last_msgtime, count; @@ -1511,7 +1441,7 @@ irqreturn_t ide_intr (int irq, void *dev_id) * Whack the status register, just in case * we have a leftover pending IRQ. */ - (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); + (void) hwif->INB(hwif->io_ports.status_addr); #endif /* CONFIG_BLK_DEV_IDEPCI */ } spin_unlock_irqrestore(&ide_lock, flags); @@ -1586,90 +1516,31 @@ irqreturn_t ide_intr (int irq, void *dev_id) return IRQ_HANDLED; } -/** - * ide_init_drive_cmd - initialize a drive command request - * @rq: request object - * - * Initialize a request before we fill it in and send it down to - * ide_do_drive_cmd. Commands must be set up by this function. Right - * now it doesn't do a lot, but if that changes abusers will have a - * nasty surprise. - */ - -void ide_init_drive_cmd (struct request *rq) -{ - memset(rq, 0, sizeof(*rq)); - rq->ref_count = 1; -} - -EXPORT_SYMBOL(ide_init_drive_cmd); - /** * ide_do_drive_cmd - issue IDE special command * @drive: device to issue command * @rq: request to issue - * @action: action for processing * * This function issues a special IDE device request * onto the request queue. * - * If action is ide_wait, then the rq is queued at the end of the - * request queue, and the function sleeps until it has been processed. - * This is for use when invoked from an ioctl handler. - * - * If action is ide_preempt, then the rq is queued at the head of - * the request queue, displacing the currently-being-processed - * request and this function returns immediately without waiting - * for the new rq to be completed. This is VERY DANGEROUS, and is - * intended for careful use by the ATAPI tape/cdrom driver code. - * - * If action is ide_end, then the rq is queued at the end of the - * request queue, and the function returns immediately without waiting - * for the new rq to be completed. This is again intended for careful - * use by the ATAPI tape/cdrom driver code. + * the rq is queued at the head of the request queue, displacing + * the currently-being-processed request and this function + * returns immediately without waiting for the new rq to be + * completed. This is VERY DANGEROUS, and is intended for + * careful use by the ATAPI tape/cdrom driver code. */ - -int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action) + +void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) { unsigned long flags; ide_hwgroup_t *hwgroup = HWGROUP(drive); - DECLARE_COMPLETION_ONSTACK(wait); - int where = ELEVATOR_INSERT_BACK, err; - int must_wait = (action == ide_wait || action == ide_head_wait); - - rq->errors = 0; - - /* - * we need to hold an extra reference to request for safe inspection - * after completion - */ - if (must_wait) { - rq->ref_count++; - rq->end_io_data = &wait; - rq->end_io = blk_end_sync_rq; - } spin_lock_irqsave(&ide_lock, flags); - if (action == ide_preempt) - hwgroup->rq = NULL; - if (action == ide_preempt || action == ide_head_wait) { - where = ELEVATOR_INSERT_FRONT; - rq->cmd_flags |= REQ_PREEMPT; - } - __elv_add_request(drive->queue, rq, where, 0); - ide_do_request(hwgroup, IDE_NO_IRQ); + hwgroup->rq = NULL; + __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 1); + __generic_unplug_device(drive->queue); spin_unlock_irqrestore(&ide_lock, flags); - - err = 0; - if (must_wait) { - wait_for_completion(&wait); - if (rq->errors) - err = -EIO; - - blk_put_request(rq); - } - - return err; } EXPORT_SYMBOL(ide_do_drive_cmd); @@ -1685,7 +1556,25 @@ void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) task.tf.lbam = bcount & 0xff; task.tf.lbah = (bcount >> 8) & 0xff; - ide_tf_load(drive, &task); + ide_tf_dump(drive->name, &task.tf); + ide_set_irq(drive, 1); + SELECT_MASK(drive, 0); + drive->hwif->tf_load(drive, &task); } EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); + +void ide_pad_transfer(ide_drive_t *drive, int write, int len) +{ + ide_hwif_t *hwif = drive->hwif; + u8 buf[4] = { 0 }; + + while (len > 0) { + if (write) + hwif->output_data(drive, NULL, buf, min(4, len)); + else + hwif->input_data(drive, NULL, buf, min(4, len)); + len -= 4; + } +} +EXPORT_SYMBOL_GPL(ide_pad_transfer);