vhost->discovery_threads--;
        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+       del_timer(&tgt->timer);
 
        switch (status) {
        case IBMVFC_MAD_SUCCESS:
        mad->iu.rsp.len = sizeof(mad->fc_iu.response);
 }
 
+/**
+ * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
+ * @evt:               ibmvfc event struct
+ *
+ * Just cleanup this event struct. Everything else is handled by
+ * the ADISC completion handler. If the ADISC never actually comes
+ * back, we still have the timer running on the ADISC event struct
+ * which will fire and cause the CRQ to get reset.
+ *
+ **/
+static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_host *vhost = evt->vhost;
+       struct ibmvfc_target *tgt = evt->tgt;
+
+       tgt_dbg(tgt, "ADISC cancel complete\n");
+       vhost->abort_threads--;
+       ibmvfc_free_event(evt);
+       kref_put(&tgt->kref, ibmvfc_release_tgt);
+       wake_up(&vhost->work_wait_q);
+}
+
+/**
+ * ibmvfc_adisc_timeout - Handle an ADISC timeout
+ * @tgt:               ibmvfc target struct
+ *
+ * If an ADISC times out, send a cancel. If the cancel times
+ * out, reset the CRQ. When the ADISC comes back as cancelled,
+ * log back into the target.
+ **/
+static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt)
+{
+       struct ibmvfc_host *vhost = tgt->vhost;
+       struct ibmvfc_event *evt;
+       struct ibmvfc_tmf *tmf;
+       unsigned long flags;
+       int rc;
+
+       tgt_dbg(tgt, "ADISC timeout\n");
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       if (vhost->abort_threads >= disc_threads ||
+           tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
+           vhost->state != IBMVFC_INITIALIZING ||
+           vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
+               return;
+       }
+
+       vhost->abort_threads++;
+       kref_get(&tgt->kref);
+       evt = ibmvfc_get_event(vhost);
+       ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
+
+       evt->tgt = tgt;
+       tmf = &evt->iu.tmf;
+       memset(tmf, 0, sizeof(*tmf));
+       tmf->common.version = 1;
+       tmf->common.opcode = IBMVFC_TMF_MAD;
+       tmf->common.length = sizeof(*tmf);
+       tmf->scsi_id = tgt->scsi_id;
+       tmf->cancel_key = tgt->cancel_key;
+
+       rc = ibmvfc_send_event(evt, vhost, default_timeout);
+
+       if (rc) {
+               tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
+               vhost->abort_threads--;
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
+               __ibmvfc_reset_host(vhost);
+       } else
+               tgt_dbg(tgt, "Attempting to cancel ADISC\n");
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+}
+
 /**
  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
  * @tgt:               ibmvfc target struct
  *
+ * When sending an ADISC we end up with two timers running. The
+ * first timer is the timer in the ibmvfc target struct. If this
+ * fires, we send a cancel to the target. The second timer is the
+ * timer on the ibmvfc event for the ADISC, which is longer. If that
+ * fires, it means the ADISC timed out and our attempt to cancel it
+ * also failed, so we need to reset the CRQ.
  **/
 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
 {
        mad = &evt->iu.passthru;
        mad->iu.flags = IBMVFC_FC_ELS;
        mad->iu.scsi_id = tgt->scsi_id;
+       mad->iu.cancel_key = tgt->cancel_key;
 
        mad->fc_iu.payload[0] = IBMVFC_ADISC;
        memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
               sizeof(vhost->login_buf->resp.node_name));
        mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
 
+       if (timer_pending(&tgt->timer))
+               mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
+       else {
+               tgt->timer.data = (unsigned long) tgt;
+               tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
+               tgt->timer.function = (void (*)(unsigned long))ibmvfc_adisc_timeout;
+               add_timer(&tgt->timer);
+       }
+
        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
-       if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+       if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
                vhost->discovery_threads--;
+               del_timer(&tgt->timer);
                ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
                kref_put(&tgt->kref, ibmvfc_release_tgt);
        } else
        tgt->new_scsi_id = scsi_id;
        tgt->vhost = vhost;
        tgt->need_login = 1;
+       tgt->cancel_key = vhost->task_set++;
+       init_timer(&tgt->timer);
        kref_init(&tgt->kref);
        ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
        spin_lock_irqsave(vhost->host->host_lock, flags);
                                spin_unlock_irqrestore(vhost->host->host_lock, flags);
                                if (rport)
                                        fc_remote_port_delete(rport);
+                               del_timer_sync(&tgt->timer);
                                kref_put(&tgt->kref, ibmvfc_release_tgt);
                                return;
                        }
        vhost->dev = dev;
        vhost->partition_number = -1;
        vhost->log_level = log_level;
+       vhost->task_set = 1;
        strcpy(vhost->partition_name, "UNKNOWN");
        init_waitqueue_head(&vhost->work_wait_q);
        init_waitqueue_head(&vhost->init_wait_q);