*
  *      Returns SUCCESS if command aborted else FAILED
  *
- *      Locks: struct Scsi_Host::host_lock held (with irqsave) on entry 
- *      and assumed to be held on return.
+ *      Locks: None held
  *
  *      Calling context: kernel thread
  *
 
        MPT_FRAME_HDR   *mf;
        u32              ctx2abort;
        int              scpnt_idx;
-       spinlock_t      *host_lock = SCpnt->device->host->host_lock;
 
        /* If we can't locate our host adapter structure, return FAILED status.
         */
 
        hd->abortSCpnt = SCpnt;
 
-       spin_unlock_irq(host_lock);
        if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
                SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun,
                ctx2abort, 2 /* 2 second timeout */)
                hd->tmPending = 0;
                hd->tmState = TM_STATE_NONE;
 
-               spin_lock_irq(host_lock);
-
                /* Unmap the DMA buffers, if any. */
                if (SCpnt->use_sg) {
                        pci_unmap_sg(ioc->pcidev, (struct scatterlist *) SCpnt->request_buffer,
                mpt_free_msg_frame(ioc, mf);
                return FAILED;
        }
-       spin_lock_irq(host_lock);
        return SUCCESS;
 }
 
 
  *             FAILED  - otherwise
  */
 int
-zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
+__zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 {
        int retval = SUCCESS;
        struct zfcp_fsf_req *new_fsf_req, *old_fsf_req;
        return retval;
 }
 
+int
+zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
+{
+       int rc;
+       struct Scsi_Host *scsi_host = scpnt->device->host;
+       spin_lock_irq(scsi_host->host_lock);
+       rc = __zfcp_scsi_eh_abort_handler(scpnt);
+       spin_unlock_irq(scsi_host->host_lock);
+       return rc;
+}
+
 /*
  * function:   zfcp_scsi_eh_device_reset_handler
  *
 
         */
        cmd->scsi_done = scsi_done;
 
-       ahd_midlayer_entrypoint_lock(ahd, &flags);
+       ahd_lock(ahd, &flags);
 
        /*
         * Close the race of a command that was in the process of
                ahd_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
                ahd_linux_queue_cmd_complete(ahd, cmd);
                ahd_schedule_completeq(ahd);
-               ahd_midlayer_entrypoint_unlock(ahd, &flags);
+               ahd_unlock(ahd, &flags);
                return (0);
        }
        dev = ahd_linux_get_device(ahd, cmd->device->channel,
                ahd_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL);
                ahd_linux_queue_cmd_complete(ahd, cmd);
                ahd_schedule_completeq(ahd);
-               ahd_midlayer_entrypoint_unlock(ahd, &flags);
+               ahd_unlock(ahd, &flags);
                printf("%s: aic79xx_linux_queue - Unable to allocate device!\n",
                       ahd_name(ahd));
                return (0);
                dev->flags |= AHD_DEV_ON_RUN_LIST;
                ahd_linux_run_device_queues(ahd);
        }
-       ahd_midlayer_entrypoint_unlock(ahd, &flags);
+       ahd_unlock(ahd, &flags);
        return (0);
 }
 
 
                printf(" 0x%x", cmd->cmnd[cdb_byte]);
        printf("\n");
 
+       spin_lock_irq(&ahc->platform_data->spin_lock);
+
        /*
         * First determine if we currently own this command.
         * Start by searching the device queue.  If not found
                }
                spin_lock_irq(&ahc->platform_data->spin_lock);
        }
+
+       spin_unlock_irq(&ahc->platform_data->spin_lock);
        return (retval);
 }
 
 
  *   Abort the current SCSI command(s).
  *-F*************************************************************************/
 static int
-aic7xxx_abort(Scsi_Cmnd *cmd)
+__aic7xxx_abort(Scsi_Cmnd *cmd)
 {
   struct aic7xxx_scb  *scb = NULL;
   struct aic7xxx_host *p;
   return SUCCESS;
 }
 
+static int
+aic7xxx_abort(Scsi_Cmnd *cmd)
+{
+       int rc;
+
+       spin_lock_irq(cmd->device->host->host_lock);
+       rc = __aic7xxx_abort(cmd);
+       spin_unlock_irq(cmd->device->host->host_lock);
+
+       return rc;
+}
+
+
 /*+F*************************************************************************
  * Function:
  *   aic7xxx_reset
 
        return 0;
 }
 
-static int ibmmca_abort(Scsi_Cmnd * cmd)
+static int __ibmmca_abort(Scsi_Cmnd * cmd)
 {
        /* Abort does not work, as the adapter never generates an interrupt on
         * whatever situation is simulated, even when really pending commands
        }
 }
 
+static int ibmmca_abort(Scsi_Cmnd * cmd)
+{
+       struct Scsi_Host *shpnt = cmd->device->host;
+       int rc;
+
+       spin_lock_irq(shpnt->host_lock);
+       rc = __ibmmca_abort(cmd);
+       spin_unlock_irq(shpnt->host_lock);
+
+       return rc;
+}
+
 static int ibmmca_host_reset(Scsi_Cmnd * cmd)
 {
        struct Scsi_Host *shpnt;
 
                return FAILED;
        }
 
-       spin_unlock_irq(hostdata->host->host_lock);
        wait_for_completion(&evt->comp);
-       spin_lock_irq(hostdata->host->host_lock);
 
        /* make sure we got a good response */
        if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) {
 
        return SUCCESS;
 }
 
-static int in2000_abort(Scsi_Cmnd * cmd)
+static int __in2000_abort(Scsi_Cmnd * cmd)
 {
        struct Scsi_Host *instance;
        struct IN2000_hostdata *hostdata;
        return SUCCESS;
 }
 
+static int in2000_abort(Scsi_Cmnd * cmd)
+{
+       int rc;
+
+       spin_lock_irq(cmd->device->host->host_lock);
+       rc = __in2000_abort(cmd);
+       spin_unlock_irq(cmd->device->host->host_lock);
+
+       return rc;
+}
 
 
 #define MAX_IN2000_HOSTS 3
 
        ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
        res = scsi_cmd->device->hostdata;
 
+       /* If we are currently going through reset/reload, return failed.
+        * This will force the mid-layer to call ipr_eh_host_reset,
+        * which will then go to sleep and wait for the reset to complete
+        */
+       if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
+               return FAILED;
        if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
                return FAILED;
 
  **/
 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
 {
-       struct ipr_ioa_cfg *ioa_cfg;
+       unsigned long flags;
+       int rc;
 
        ENTER;
-       ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
 
-       /* If we are currently going through reset/reload, return failed. This will force the
-          mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
-          reset to complete */
-       if (ioa_cfg->in_reset_reload)
-               return FAILED;
-       if (ioa_cfg->ioa_is_dead)
-               return FAILED;
-       if (!scsi_cmd->device->hostdata)
-               return FAILED;
+       spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
+       rc = ipr_cancel_op(scsi_cmd);
+       spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
 
        LEAVE;
-       return ipr_cancel_op(scsi_cmd);
+       return rc;
 }
 
 /**
 
        ips_ha_t *ha;
        ips_copp_wait_item_t *item;
        int ret;
+       unsigned long cpu_flags;
+       struct Scsi_Host *host;
 
        METHOD_TRACE("ips_eh_abort", 1);
 
        if (!SC)
                return (FAILED);
 
+       host = SC->device->host;
        ha = (ips_ha_t *) SC->device->host->hostdata;
 
        if (!ha)
        if (!ha->active)
                return (FAILED);
 
+       IPS_LOCK_SAVE(host->host_lock, cpu_flags);
+
        /* See if the command is on the copp queue */
        item = ha->copp_waitlist.head;
        while ((item) && (item->scsi_cmd != SC))
                /* command must have already been sent */
                ret = (FAILED);
        }
+
+       IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags);
        return ret;
 }
 
 
 }
 
 static int
-lpfc_abort_handler(struct scsi_cmnd *cmnd)
+__lpfc_abort_handler(struct scsi_cmnd *cmnd)
 {
        struct lpfc_hba *phba =
                        (struct lpfc_hba *)cmnd->device->host->hostdata[0];
        return ret == IOCB_SUCCESS ? SUCCESS : FAILED;
 }
 
+static int
+lpfc_abort_handler(struct scsi_cmnd *cmnd)
+{
+       int rc;
+       spin_lock_irq(cmnd->device->host->host_lock);
+       rc = __lpfc_abort_handler(cmnd);
+       spin_unlock_irq(cmnd->device->host->host_lock);
+       return rc;
+}
+
 static int
 lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
 {
 
  * aborted. All the commands issued to the F/W must complete.
  **/
 static int
-megaraid_abort_handler(struct scsi_cmnd *scp)
+__megaraid_abort_handler(struct scsi_cmnd *scp)
 {
        adapter_t               *adapter;
        mraid_device_t          *raid_dev;
        return FAILED;
 }
 
+static int
+megaraid_abort_handler(struct scsi_cmnd *scp)
+{
+       adapter_t       *adapter;
+       int rc;
+
+       adapter         = SCP2ADAPTER(scp);
+
+       spin_lock_irq(adapter->host_lock);
+       rc = __megaraid_abort_handler(scp);
+       spin_unlock_irq(adapter->host_lock);
+
+       return rc;
+}
+
 
 /**
  * megaraid_reset_handler - device reset hadler for mailbox based driver
 
 static int
 qla1280_eh_abort(struct scsi_cmnd * cmd)
 {
-       return qla1280_error_action(cmd, ABORT_COMMAND);
+       int rc;
+
+       spin_lock_irq(cmd->device->host->host_lock);
+       rc = qla1280_error_action(cmd, ABORT_COMMAND);
+       spin_unlock_irq(cmd->device->host->host_lock);
+
+       return rc;
 }
 
 /**************************************************************************
 
        serial = cmd->serial_number;
 
        /* Check active list for command command. */
-       spin_unlock_irq(ha->host->host_lock);
        spin_lock(&ha->hardware_lock);
        for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
                sp = ha->outstanding_cmds[i];
                }
                spin_lock(&ha->hardware_lock);
        }
-       spin_lock_irq(ha->host->host_lock);
 
        qla_printk(KERN_INFO, ha, 
            "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no,
 
                 * abort a timed out command or not.  not sure how
                 * we should treat them differently anyways.
                 */
-               spin_lock_irqsave(shost->host_lock, flags);
                if (shost->hostt->eh_abort_handler)
                        shost->hostt->eh_abort_handler(scmd);
-               spin_unlock_irqrestore(shost->host_lock, flags);
                        
                scmd->request->rq_status = RQ_SCSI_DONE;
                scmd->owner = SCSI_OWNER_ERROR_HANDLER;
  **/
 static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd)
 {
-       unsigned long flags;
-       int rtn = FAILED;
-
        if (!scmd->device->host->hostt->eh_abort_handler)
-               return rtn;
+               return FAILED;
 
        /*
         * scsi_done was called just after the command timed out and before
 
        scmd->owner = SCSI_OWNER_LOWLEVEL;
 
-       spin_lock_irqsave(scmd->device->host->host_lock, flags);
-       rtn = scmd->device->host->hostt->eh_abort_handler(scmd);
-       spin_unlock_irqrestore(scmd->device->host->host_lock, flags);
-
-       return rtn;
+       return scmd->device->host->hostt->eh_abort_handler(scmd);
 }
 
 /**
 
  */
 static int sym53c8xx_eh_abort_handler(struct scsi_cmnd *cmd)
 {
-       return sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
+       int rc;
+
+       spin_lock_irq(cmd->device->host->host_lock);
+       rc = sym_eh_handler(SYM_EH_ABORT, "ABORT", cmd);
+       spin_unlock_irq(cmd->device->host->host_lock);
+
+       return rc;
 }
 
 static int sym53c8xx_eh_device_reset_handler(struct scsi_cmnd *cmd)
 
        ogm_addr = (unsigned int)isa_bus_to_virt(inl(port0 + 23));
        icm_status = inb(port0 + 27);
        icm_addr = (unsigned int)isa_bus_to_virt(inl(port0 + 28));
-       spin_lock_irqsave(host->host_lock, flags);
+       spin_unlock_irqrestore(host->host_lock, flags);
       }
 
     /* First check to see if an interrupt is pending.  I suspect the SiS
 
                set_bit(US_FLIDX_ABORTING, &us->flags);
                usb_stor_stop_transport(us);
        }
-       scsi_unlock(us_to_host(us));
 
        /* Wait for the aborted command to finish */
        wait_for_completion(&us->notify);
 
        /* Reacquire the lock and allow USB transfers to resume */
-       scsi_lock(us_to_host(us));
        clear_bit(US_FLIDX_ABORTING, &us->flags);
        clear_bit(US_FLIDX_TIMED_OUT, &us->flags);
        return SUCCESS;