]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/scsi/lpfc/lpfc_hbadisc.c
Merge commit 'v2.6.27-rc7' into x86/microcode
[linux-2.6-omap-h63xx.git] / drivers / scsi / lpfc / lpfc_hbadisc.c
index 7cb68feb04fd11eb18c9a3e0d487ffe5a3b270a6..a98d11bf35767dbf5e3b6df9b8b5bc483616a597 100644 (file)
@@ -153,11 +153,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
         * count until this queued work is done
         */
        evtp->evt_arg1  = lpfc_nlp_get(ndlp);
-       evtp->evt       = LPFC_EVT_DEV_LOSS;
-       list_add_tail(&evtp->evt_listp, &phba->work_list);
-       if (phba->work_wait)
-               wake_up(phba->work_wait);
-
+       if (evtp->evt_arg1) {
+               evtp->evt = LPFC_EVT_DEV_LOSS;
+               list_add_tail(&evtp->evt_listp, &phba->work_list);
+               lpfc_worker_wake_up(phba);
+       }
        spin_unlock_irq(&phba->hbalock);
 
        return;
@@ -276,14 +276,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
                lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 }
 
-
-void
-lpfc_worker_wake_up(struct lpfc_hba *phba)
-{
-       wake_up(phba->work_wait);
-       return;
-}
-
 static void
 lpfc_work_list_done(struct lpfc_hba *phba)
 {
@@ -429,6 +421,8 @@ lpfc_work_done(struct lpfc_hba *phba)
                || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
                if (pring->flag & LPFC_STOP_IOCB_EVENT) {
                        pring->flag |= LPFC_DEFERRED_RING_EVENT;
+                       /* Set the lpfc data pending flag */
+                       set_bit(LPFC_DATA_READY, &phba->data_flags);
                } else {
                        pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
                        lpfc_sli_handle_slow_ring_event(phba, pring,
@@ -459,69 +453,29 @@ lpfc_work_done(struct lpfc_hba *phba)
        lpfc_work_list_done(phba);
 }
 
-static int
-check_work_wait_done(struct lpfc_hba *phba)
-{
-       struct lpfc_vport *vport;
-       struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-       int rc = 0;
-
-       spin_lock_irq(&phba->hbalock);
-       list_for_each_entry(vport, &phba->port_list, listentry) {
-               if (vport->work_port_events) {
-                       rc = 1;
-                       break;
-               }
-       }
-       if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
-           kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
-               rc = 1;
-               phba->work_found++;
-       } else
-               phba->work_found = 0;
-       spin_unlock_irq(&phba->hbalock);
-       return rc;
-}
-
-
 int
 lpfc_do_work(void *p)
 {
        struct lpfc_hba *phba = p;
        int rc;
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
 
        set_user_nice(current, -20);
-       phba->work_wait = &work_waitq;
-       phba->work_found = 0;
+       phba->data_flags = 0;
 
        while (1) {
-
-               rc = wait_event_interruptible(work_waitq,
-                                             check_work_wait_done(phba));
-
+               /* wait and check worker queue activities */
+               rc = wait_event_interruptible(phba->work_waitq,
+                                       (test_and_clear_bit(LPFC_DATA_READY,
+                                                           &phba->data_flags)
+                                        || kthread_should_stop()));
                BUG_ON(rc);
 
                if (kthread_should_stop())
                        break;
 
+               /* Attend pending lpfc data processing */
                lpfc_work_done(phba);
-
-               /* If there is alot of slow ring work, like during link up
-                * check_work_wait_done() may cause this thread to not give
-                * up the CPU for very long periods of time. This may cause
-                * soft lockups or other problems. To avoid these situations
-                * give up the CPU here after LPFC_MAX_WORKER_ITERATION
-                * consecutive iterations.
-                */
-               if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
-                       phba->work_found = 0;
-                       schedule();
-               }
        }
-       spin_lock_irq(&phba->hbalock);
-       phba->work_wait = NULL;
-       spin_unlock_irq(&phba->hbalock);
        return 0;
 }
 
@@ -551,10 +505,10 @@ lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
 
        spin_lock_irqsave(&phba->hbalock, flags);
        list_add_tail(&evtp->evt_listp, &phba->work_list);
-       if (phba->work_wait)
-               lpfc_worker_wake_up(phba);
        spin_unlock_irqrestore(&phba->hbalock, flags);
 
+       lpfc_worker_wake_up(phba);
+
        return 1;
 }
 
@@ -963,6 +917,10 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
        if (phba->fc_topology == TOPOLOGY_LOOP) {
                phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
 
+               if (phba->cfg_enable_npiv)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+                               "1309 Link Up Event npiv not supported in loop "
+                               "topology\n");
                                /* Get Loop Map information */
                if (la->il)
                        vport->fc_flag |= FC_LBIT;
@@ -1087,6 +1045,8 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        MAILBOX_t *mb = &pmb->mb;
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 
+       /* Unblock ELS traffic */
+       phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
        /* Check for error */
        if (mb->mbxStatus) {
                lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -1650,7 +1610,6 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                ndlp->nlp_DID, old_state, state);
 
        if (old_state == NLP_STE_NPR_NODE &&
-           (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
            state != NLP_STE_NPR_NODE)
                lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if (old_state == NLP_STE_UNMAPPED_NODE) {
@@ -1687,8 +1646,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-       if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
-               lpfc_cancel_retry_delay_tmo(vport, ndlp);
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
                lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
        spin_lock_irq(shost->host_lock);
@@ -1701,8 +1659,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 static void
 lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
-       if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
-               lpfc_cancel_retry_delay_tmo(vport, ndlp);
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
                lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
        lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
@@ -2121,10 +2078,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        ndlp->nlp_last_elscmd = 0;
        del_timer_sync(&ndlp->nlp_delayfunc);
 
-       if (!list_empty(&ndlp->els_retry_evt.evt_listp))
-               list_del_init(&ndlp->els_retry_evt.evt_listp);
-       if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
-               list_del_init(&ndlp->dev_loss_evt.evt_listp);
+       list_del_init(&ndlp->els_retry_evt.evt_listp);
+       list_del_init(&ndlp->dev_loss_evt.evt_listp);
 
        lpfc_unreg_rpi(vport, ndlp);
 
@@ -2144,10 +2099,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        LPFC_MBOXQ_t *mbox;
        int rc;
 
-       if (ndlp->nlp_flag & NLP_DELAY_TMO) {
-               lpfc_cancel_retry_delay_tmo(vport, ndlp);
-       }
-
+       lpfc_cancel_retry_delay_tmo(vport, ndlp);
        if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
                /* For this case we need to cleanup the default rpi
                 * allocated by the firmware.
@@ -2317,8 +2269,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
                        /* Since this node is marked for discovery,
                         * delay timeout is not needed.
                         */
-                       if (ndlp->nlp_flag & NLP_DELAY_TMO)
-                               lpfc_cancel_retry_delay_tmo(vport, ndlp);
+                       lpfc_cancel_retry_delay_tmo(vport, ndlp);
                } else
                        ndlp = NULL;
        } else {
@@ -2643,21 +2594,20 @@ lpfc_disc_timeout(unsigned long ptr)
 {
        struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
        struct lpfc_hba   *phba = vport->phba;
+       uint32_t tmo_posted;
        unsigned long flags = 0;
 
        if (unlikely(!phba))
                return;
 
-       if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
-               spin_lock_irqsave(&vport->work_port_lock, flags);
+       spin_lock_irqsave(&vport->work_port_lock, flags);
+       tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
+       if (!tmo_posted)
                vport->work_port_events |= WORKER_DISC_TMO;
-               spin_unlock_irqrestore(&vport->work_port_lock, flags);
+       spin_unlock_irqrestore(&vport->work_port_lock, flags);
 
-               spin_lock_irqsave(&phba->hbalock, flags);
-               if (phba->work_wait)
-                       lpfc_worker_wake_up(phba);
-               spin_unlock_irqrestore(&phba->hbalock, flags);
-       }
+       if (!tmo_posted)
+               lpfc_worker_wake_up(phba);
        return;
 }