ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
                        ata_postreset_fn_t postreset)
 {
-       struct ata_host_set *host_set = ap->host_set;
        struct ata_eh_context *ehc = &ap->eh_context;
        struct ata_queued_cmd *qc;
        unsigned long flags;
                qc = NULL;
 
        /* reset PIO HSM and stop DMA engine */
-       spin_lock_irqsave(&host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
 
        ap->hsm_task_state = HSM_ST_IDLE;
 
        ata_chk_status(ap);
        ap->ops->irq_clear(ap);
 
-       spin_unlock_irqrestore(&host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        if (thaw)
                ata_eh_thaw_port(ap);
 
 
        DPRINTK("ENTER\n");
 
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
        ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        DPRINTK("flush #1\n");
        flush_workqueue(ata_wq);
                flush_workqueue(ata_wq);
        }
 
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
        ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        DPRINTK("EXIT\n");
 }
        unsigned int err_mask;
        int rc;
 
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
 
        /* no internal command while frozen */
        if (ap->flags & ATA_FLAG_FROZEN) {
-               spin_unlock_irqrestore(&ap->host_set->lock, flags);
+               spin_unlock_irqrestore(ap->lock, flags);
                return AC_ERR_SYSTEM;
        }
 
 
        ata_qc_issue(qc);
 
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
 
        ata_port_flush_task(ap);
 
        if (!rc) {
-               spin_lock_irqsave(&ap->host_set->lock, flags);
+               spin_lock_irqsave(ap->lock, flags);
 
                /* We're racing with irq here.  If we lose, the
                 * following test prevents us from completing the qc
                                       "qc timeout (cmd 0x%x)\n", command);
                }
 
-               spin_unlock_irqrestore(&ap->host_set->lock, flags);
+               spin_unlock_irqrestore(ap->lock, flags);
        }
 
        /* do post_internal_cmd */
        }
 
        /* finish up */
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
 
        *tf = qc->result_tf;
        err_mask = qc->err_mask;
                ata_port_probe(ap);
        }
 
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        return err_mask;
 }
 
        if (ap->ops->error_handler) {
                if (in_wq) {
-                       spin_lock_irqsave(&ap->host_set->lock, flags);
+                       spin_lock_irqsave(ap->lock, flags);
 
                        /* EH might have kicked in while host_set lock
                         * is released.
                                        ata_port_freeze(ap);
                        }
 
-                       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+                       spin_unlock_irqrestore(ap->lock, flags);
                } else {
                        if (likely(!(qc->err_mask & AC_ERR_HSM)))
                                ata_qc_complete(qc);
                }
        } else {
                if (in_wq) {
-                       spin_lock_irqsave(&ap->host_set->lock, flags);
+                       spin_lock_irqsave(ap->lock, flags);
                        ata_irq_on(ap);
                        ata_qc_complete(qc);
-                       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+                       spin_unlock_irqrestore(ap->lock, flags);
                } else
                        ata_qc_complete(qc);
        }
                 * hsm_task_state is changed. Hence, the following locking.
                 */
                if (in_wq)
-                       spin_lock_irqsave(&ap->host_set->lock, flags);
+                       spin_lock_irqsave(ap->lock, flags);
 
                if (qc->tf.protocol == ATA_PROT_PIO) {
                        /* PIO data out protocol.
                        atapi_send_cdb(ap, qc);
 
                if (in_wq)
-                       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+                       spin_unlock_irqrestore(ap->lock, flags);
 
                /* if polling, ata_pio_task() handles the rest.
                 * otherwise, interrupt handler takes over from here.
         * requests which occur asynchronously.  Synchronize using
         * host_set lock.
         */
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
        dev->flags &= ~ATA_DFLAG_INIT_MASK;
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
               sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
        host->unique_id = ata_unique_id++;
        host->max_cmd_len = 12;
 
+       ap->lock = &host_set->lock;
        ap->flags = ATA_FLAG_DISABLED;
        ap->id = host->unique_id;
        ap->host = host;
                        ata_port_probe(ap);
 
                        /* kick EH for boot probing */
-                       spin_lock_irqsave(&ap->host_set->lock, flags);
+                       spin_lock_irqsave(ap->lock, flags);
 
                        ap->eh_info.probe_mask = (1 << ATA_MAX_DEVICES) - 1;
                        ap->eh_info.action |= ATA_EH_SOFTRESET;
                        ap->flags |= ATA_FLAG_LOADING;
                        ata_port_schedule_eh(ap);
 
-                       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+                       spin_unlock_irqrestore(ap->lock, flags);
 
                        /* wait for EH to finish */
                        ata_port_wait_eh(ap);
                return;
 
        /* tell EH we're leaving & flush EH */
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
        ap->flags |= ATA_FLAG_UNLOADING;
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        ata_port_wait_eh(ap);
 
        /* EH is now guaranteed to see UNLOADING, so no new device
         * will be attached.  Disable all existing devices.
         */
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
 
        for (i = 0; i < ATA_MAX_DEVICES; i++)
                ata_dev_disable(&ap->device[i]);
 
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        /* Final freeze & EH.  All in-flight commands are aborted.  EH
         * will be skipped and retrials will be terminated with bad
         * target.
         */
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
        ata_port_freeze(ap);    /* won't be thawed */
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        ata_port_wait_eh(ap);
 
 
        }
 
        ret = EH_HANDLED;
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
        qc = ata_qc_from_tag(ap, ap->active_tag);
        if (qc) {
                WARN_ON(qc->scsicmd != cmd);
                qc->err_mask |= AC_ERR_TIMEOUT;
                ret = EH_NOT_HANDLED;
        }
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
  out:
        DPRINTK("EXIT, ret=%d\n", ret);
 void ata_scsi_error(struct Scsi_Host *host)
 {
        struct ata_port *ap = ata_shost_to_port(host);
-       spinlock_t *hs_lock = &ap->host_set->lock;
+       spinlock_t *ap_lock = ap->lock;
        int i, repeat_cnt = ATA_EH_MAX_REPEAT;
        unsigned long flags;
 
                struct scsi_cmnd *scmd, *tmp;
                int nr_timedout = 0;
 
-               spin_lock_irqsave(hs_lock, flags);
+               spin_lock_irqsave(ap_lock, flags);
 
                list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
                        struct ata_queued_cmd *qc;
                if (nr_timedout)
                        __ata_port_freeze(ap);
 
-               spin_unlock_irqrestore(hs_lock, flags);
+               spin_unlock_irqrestore(ap_lock, flags);
        } else
-               spin_unlock_wait(hs_lock);
+               spin_unlock_wait(ap_lock);
 
  repeat:
        /* invoke error handler */
        if (ap->ops->error_handler) {
                /* fetch & clear EH info */
-               spin_lock_irqsave(hs_lock, flags);
+               spin_lock_irqsave(ap_lock, flags);
 
                memset(&ap->eh_context, 0, sizeof(ap->eh_context));
                ap->eh_context.i = ap->eh_info;
                ap->flags |= ATA_FLAG_EH_IN_PROGRESS;
                ap->flags &= ~ATA_FLAG_EH_PENDING;
 
-               spin_unlock_irqrestore(hs_lock, flags);
+               spin_unlock_irqrestore(ap_lock, flags);
 
                /* invoke EH.  if unloading, just finish failed qcs */
                if (!(ap->flags & ATA_FLAG_UNLOADING))
                 * recovered the port but before this point.  Repeat
                 * EH in such case.
                 */
-               spin_lock_irqsave(hs_lock, flags);
+               spin_lock_irqsave(ap_lock, flags);
 
                if (ap->flags & ATA_FLAG_EH_PENDING) {
                        if (--repeat_cnt) {
                                ata_port_printk(ap, KERN_INFO,
                                        "EH pending after completion, "
                                        "repeating EH (cnt=%d)\n", repeat_cnt);
-                               spin_unlock_irqrestore(hs_lock, flags);
+                               spin_unlock_irqrestore(ap_lock, flags);
                                goto repeat;
                        }
                        ata_port_printk(ap, KERN_ERR, "EH pending after %d "
                /* this run is complete, make sure EH info is clear */
                memset(&ap->eh_info, 0, sizeof(ap->eh_info));
 
-               /* Clear host_eh_scheduled while holding hs_lock such
+               /* Clear host_eh_scheduled while holding ap_lock such
                 * that if exception occurs after this point but
                 * before EH completion, SCSI midlayer will
                 * re-initiate EH.
                 */
                host->host_eh_scheduled = 0;
 
-               spin_unlock_irqrestore(hs_lock, flags);
+               spin_unlock_irqrestore(ap_lock, flags);
        } else {
                WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
                ap->ops->eng_timeout(ap);
        scsi_eh_flush_done_q(&ap->eh_done_q);
 
        /* clean up */
-       spin_lock_irqsave(hs_lock, flags);
+       spin_lock_irqsave(ap_lock, flags);
 
        if (ap->flags & ATA_FLAG_LOADING) {
                ap->flags &= ~ATA_FLAG_LOADING;
        ap->flags &= ~ATA_FLAG_EH_IN_PROGRESS;
        wake_up_all(&ap->eh_wait_q);
 
-       spin_unlock_irqrestore(hs_lock, flags);
+       spin_unlock_irqrestore(ap_lock, flags);
 
        DPRINTK("EXIT\n");
 }
        DEFINE_WAIT(wait);
 
  retry:
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
 
        while (ap->flags & (ATA_FLAG_EH_PENDING | ATA_FLAG_EH_IN_PROGRESS)) {
                prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
-               spin_unlock_irqrestore(&ap->host_set->lock, flags);
+               spin_unlock_irqrestore(ap->lock, flags);
                schedule();
-               spin_lock_irqsave(&ap->host_set->lock, flags);
+               spin_lock_irqsave(ap->lock, flags);
        }
        finish_wait(&ap->eh_wait_q, &wait);
 
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        /* make sure SCSI EH is complete */
        if (scsi_host_in_recovery(ap->host)) {
 static void ata_qc_timeout(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
-       struct ata_host_set *host_set = ap->host_set;
        u8 host_stat = 0, drv_stat;
        unsigned long flags;
 
 
        ap->hsm_task_state = HSM_ST_IDLE;
 
-       spin_lock_irqsave(&host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
 
        switch (qc->tf.protocol) {
 
                break;
        }
 
-       spin_unlock_irqrestore(&host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        ata_eh_qc_complete(qc);
 
        if (!ap->ops->error_handler)
                return;
 
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
        __ata_port_freeze(ap);
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 }
 
 /**
        if (!ap->ops->error_handler)
                return;
 
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
 
        ap->flags &= ~ATA_FLAG_FROZEN;
 
        if (ap->ops->thaw)
                ap->ops->thaw(ap);
 
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        DPRINTK("ata%u port thawed\n", ap->id);
 }
        struct scsi_cmnd *scmd = qc->scsicmd;
        unsigned long flags;
 
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
        qc->scsidone = ata_eh_scsidone;
        __ata_qc_complete(qc);
        WARN_ON(ata_tag_valid(qc->tag));
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
 }
 
        ata_dev_disable(dev);
 
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
 
        dev->flags &= ~ATA_DFLAG_DETACH;
 
                ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
        }
 
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 }
 
 static void ata_eh_clear_action(struct ata_device *dev,
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
        ata_eh_clear_action(dev, &ap->eh_info, action);
        ap->flags |= ATA_FLAG_RECOVERED;
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 }
 
 /**
                                break;
                        }
 
-                       spin_lock_irqsave(&ap->host_set->lock, flags);
+                       spin_lock_irqsave(ap->lock, flags);
                        ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
-                       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+                       spin_unlock_irqrestore(ap->lock, flags);
                }
        }
 
 
        if (!ap->ops->error_handler)
                return;
 
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
        dev = __ata_scsi_find_dev(ap, sdev);
        if (dev && dev->sdev) {
                /* SCSI device already in CANCEL state, no need to offline it */
                dev->flags |= ATA_DFLAG_DETACH;
                ata_port_schedule_eh(ap);
        }
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 }
 
 /**
        ap = ata_shost_to_port(shost);
 
        spin_unlock(shost->host_lock);
-       spin_lock(&ap->host_set->lock);
+       spin_lock(ap->lock);
 
        ata_scsi_dump_cdb(ap, cmd);
 
                done(cmd);
        }
 
-       spin_unlock(&ap->host_set->lock);
+       spin_unlock(ap->lock);
        spin_lock(shost->host_lock);
        return rc;
 }
         * increments reference counts regardless of device state.
         */
        mutex_lock(&ap->host->scan_mutex);
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
 
        /* clearing dev->sdev is protected by host_set lock */
        sdev = dev->sdev;
                }
        }
 
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
        mutex_unlock(&ap->host->scan_mutex);
 
        if (sdev) {
                if (!(dev->flags & ATA_DFLAG_DETACHED))
                        continue;
 
-               spin_lock_irqsave(&ap->host_set->lock, flags);
+               spin_lock_irqsave(ap->lock, flags);
                dev->flags &= ~ATA_DFLAG_DETACHED;
-               spin_unlock_irqrestore(&ap->host_set->lock, flags);
+               spin_unlock_irqrestore(ap->lock, flags);
 
                ata_scsi_remove_dev(dev);
        }
            (lun != SCAN_WILD_CARD && lun != 0))
                return -EINVAL;
 
-       spin_lock_irqsave(&ap->host_set->lock, flags);
+       spin_lock_irqsave(ap->lock, flags);
 
        if (id == SCAN_WILD_CARD) {
                ap->eh_info.probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
        if (rc == 0)
                ata_port_schedule_eh(ap);
 
-       spin_unlock_irqrestore(&ap->host_set->lock, flags);
+       spin_unlock_irqrestore(ap->lock, flags);
 
        return rc;
 }
 
 struct ata_port {
        struct Scsi_Host        *host;  /* our co-allocated scsi host */
        const struct ata_port_operations *ops;
+       spinlock_t              *lock;
        unsigned long           flags;  /* ATA_FLAG_xxx */
        unsigned int            id;     /* unique id req'd by scsi midlyr */
        unsigned int            port_no; /* unique port #; from zero */