1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
40 /* AlpaArray for assignment of scsid for scan-down and bind_method */
41 static uint8_t lpfcAlpaArray[] = {
42 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
43 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
44 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
45 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
46 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
47 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
48 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
49 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
50 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
51 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
52 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
53 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
54 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 static void lpfc_disc_timeout_handler(struct lpfc_hba *);
60 lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
62 uint8_t *name = (uint8_t *)&ndlp->nlp_portname;
65 spin_lock_irq(phba->host->host_lock);
66 if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) {
67 spin_unlock_irq(phba->host->host_lock);
72 * If a discovery event readded nodev_timer after timer
73 * firing and before processing the timer, cancel the
76 spin_unlock_irq(phba->host->host_lock);
77 del_timer_sync(&ndlp->nlp_tmofunc);
78 spin_lock_irq(phba->host->host_lock);
80 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
82 if (ndlp->nlp_sid != NLP_NO_SID) {
84 /* flush the target */
85 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
86 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
88 spin_unlock_irq(phba->host->host_lock);
91 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
92 "%d:0203 Nodev timeout on "
93 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
94 "NPort x%x Data: x%x x%x x%x\n",
96 *name, *(name+1), *(name+2), *(name+3),
97 *(name+4), *(name+5), *(name+6), *(name+7),
98 ndlp->nlp_DID, ndlp->nlp_flag,
99 ndlp->nlp_state, ndlp->nlp_rpi);
101 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
102 "%d:0204 Nodev timeout on "
103 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
104 "NPort x%x Data: x%x x%x x%x\n",
106 *name, *(name+1), *(name+2), *(name+3),
107 *(name+4), *(name+5), *(name+6), *(name+7),
108 ndlp->nlp_DID, ndlp->nlp_flag,
109 ndlp->nlp_state, ndlp->nlp_rpi);
112 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
117 lpfc_work_list_done(struct lpfc_hba * phba)
119 struct lpfc_work_evt *evtp = NULL;
120 struct lpfc_nodelist *ndlp;
123 spin_lock_irq(phba->host->host_lock);
124 while(!list_empty(&phba->work_list)) {
125 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
127 spin_unlock_irq(phba->host->host_lock);
130 case LPFC_EVT_NODEV_TMO:
131 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
132 lpfc_process_nodev_timeout(phba, ndlp);
135 case LPFC_EVT_ELS_RETRY:
136 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
137 lpfc_els_retry_delay_handler(ndlp);
140 case LPFC_EVT_ONLINE:
141 if (phba->hba_state < LPFC_LINK_DOWN)
142 *(int *)(evtp->evt_arg1) = lpfc_online(phba);
144 *(int *)(evtp->evt_arg1) = 0;
145 complete((struct completion *)(evtp->evt_arg2));
147 case LPFC_EVT_OFFLINE:
148 if (phba->hba_state >= LPFC_LINK_DOWN)
150 lpfc_sli_brdrestart(phba);
151 *(int *)(evtp->evt_arg1) =
152 lpfc_sli_brdready(phba,HS_FFRDY | HS_MBRDY);
153 complete((struct completion *)(evtp->evt_arg2));
155 case LPFC_EVT_WARM_START:
156 if (phba->hba_state >= LPFC_LINK_DOWN)
158 lpfc_reset_barrier(phba);
159 lpfc_sli_brdreset(phba);
160 lpfc_hba_down_post(phba);
161 *(int *)(evtp->evt_arg1) =
162 lpfc_sli_brdready(phba, HS_MBRDY);
163 complete((struct completion *)(evtp->evt_arg2));
166 if (phba->hba_state >= LPFC_LINK_DOWN)
168 *(int *)(evtp->evt_arg1)
169 = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba);
170 complete((struct completion *)(evtp->evt_arg2));
175 spin_lock_irq(phba->host->host_lock);
177 spin_unlock_irq(phba->host->host_lock);
182 lpfc_work_done(struct lpfc_hba * phba)
184 struct lpfc_sli_ring *pring;
188 uint32_t work_hba_events;
190 spin_lock_irq(phba->host->host_lock);
191 ha_copy = phba->work_ha;
193 work_hba_events=phba->work_hba_events;
194 spin_unlock_irq(phba->host->host_lock);
196 if (ha_copy & HA_ERATT)
197 lpfc_handle_eratt(phba);
199 if (ha_copy & HA_MBATT)
200 lpfc_sli_handle_mb_event(phba);
202 if (ha_copy & HA_LATT)
203 lpfc_handle_latt(phba);
205 if (work_hba_events & WORKER_DISC_TMO)
206 lpfc_disc_timeout_handler(phba);
208 if (work_hba_events & WORKER_ELS_TMO)
209 lpfc_els_timeout_handler(phba);
211 if (work_hba_events & WORKER_MBOX_TMO)
212 lpfc_mbox_timeout_handler(phba);
214 if (work_hba_events & WORKER_FDMI_TMO)
215 lpfc_fdmi_tmo_handler(phba);
217 spin_lock_irq(phba->host->host_lock);
218 phba->work_hba_events &= ~work_hba_events;
219 spin_unlock_irq(phba->host->host_lock);
221 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
222 pring = &phba->sli.ring[i];
223 if ((ha_copy & HA_RXATT)
224 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
225 if (pring->flag & LPFC_STOP_IOCB_MASK) {
226 pring->flag |= LPFC_DEFERRED_RING_EVENT;
228 lpfc_sli_handle_slow_ring_event(phba, pring,
231 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
234 * Turn on Ring interrupts
236 spin_lock_irq(phba->host->host_lock);
237 control = readl(phba->HCregaddr);
238 control |= (HC_R0INT_ENA << i);
239 writel(control, phba->HCregaddr);
240 readl(phba->HCregaddr); /* flush */
241 spin_unlock_irq(phba->host->host_lock);
245 lpfc_work_list_done (phba);
250 check_work_wait_done(struct lpfc_hba *phba) {
252 spin_lock_irq(phba->host->host_lock);
254 phba->work_hba_events ||
255 (!list_empty(&phba->work_list)) ||
256 kthread_should_stop()) {
257 spin_unlock_irq(phba->host->host_lock);
260 spin_unlock_irq(phba->host->host_lock);
266 lpfc_do_work(void *p)
268 struct lpfc_hba *phba = p;
270 DECLARE_WAIT_QUEUE_HEAD(work_waitq);
272 set_user_nice(current, -20);
273 phba->work_wait = &work_waitq;
277 rc = wait_event_interruptible(work_waitq,
278 check_work_wait_done(phba));
281 if (kthread_should_stop())
284 lpfc_work_done(phba);
287 phba->work_wait = NULL;
292 * This is only called to handle FC worker events. Since this a rare
293 * occurance, we allocate a struct lpfc_work_evt structure here instead of
294 * embedding it in the IOCB.
297 lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
300 struct lpfc_work_evt *evtp;
303 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
304 * be queued to worker thread for processing
306 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
310 evtp->evt_arg1 = arg1;
311 evtp->evt_arg2 = arg2;
314 list_add_tail(&evtp->evt_listp, &phba->work_list);
315 spin_lock_irq(phba->host->host_lock);
317 wake_up(phba->work_wait);
318 spin_unlock_irq(phba->host->host_lock);
324 lpfc_linkdown(struct lpfc_hba * phba)
326 struct lpfc_sli *psli;
327 struct lpfc_nodelist *ndlp, *next_ndlp;
328 struct list_head *listp, *node_list[7];
333 /* sysfs or selective reset may call this routine to clean up */
334 if (phba->hba_state >= LPFC_LINK_DOWN) {
335 if (phba->hba_state == LPFC_LINK_DOWN)
338 spin_lock_irq(phba->host->host_lock);
339 phba->hba_state = LPFC_LINK_DOWN;
340 spin_unlock_irq(phba->host->host_lock);
343 /* Clean up any firmware default rpi's */
344 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
345 lpfc_unreg_did(phba, 0xffffffff, mb);
346 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
347 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
348 == MBX_NOT_FINISHED) {
349 mempool_free( mb, phba->mbox_mem_pool);
353 /* Cleanup any outstanding RSCN activity */
354 lpfc_els_flush_rscn(phba);
356 /* Cleanup any outstanding ELS commands */
357 lpfc_els_flush_cmd(phba);
359 /* Issue a LINK DOWN event to all nodes */
360 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
361 node_list[1] = &phba->fc_nlpmap_list;
362 node_list[2] = &phba->fc_nlpunmap_list;
363 node_list[3] = &phba->fc_prli_list;
364 node_list[4] = &phba->fc_reglogin_list;
365 node_list[5] = &phba->fc_adisc_list;
366 node_list[6] = &phba->fc_plogi_list;
367 for (i = 0; i < 7; i++) {
368 listp = node_list[i];
369 if (list_empty(listp))
372 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
374 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
375 NLP_EVT_DEVICE_RECOVERY);
377 /* Check config parameter use-adisc or FCP-2 */
378 if ((rc != NLP_STE_FREED_NODE) &&
379 (phba->cfg_use_adisc == 0) &&
380 !(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) {
381 /* We know we will have to relogin, so
382 * unreglogin the rpi right now to fail
383 * any outstanding I/Os quickly.
385 lpfc_unreg_rpi(phba, ndlp);
390 /* free any ndlp's on unused list */
391 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
393 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
396 /* Setup myDID for link up if we are in pt2pt mode */
397 if (phba->fc_flag & FC_PT2PT) {
399 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
400 lpfc_config_link(phba, mb);
401 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
402 if (lpfc_sli_issue_mbox
403 (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
404 == MBX_NOT_FINISHED) {
405 mempool_free( mb, phba->mbox_mem_pool);
408 spin_lock_irq(phba->host->host_lock);
409 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
410 spin_unlock_irq(phba->host->host_lock);
412 spin_lock_irq(phba->host->host_lock);
413 phba->fc_flag &= ~FC_LBIT;
414 spin_unlock_irq(phba->host->host_lock);
416 /* Turn off discovery timer if its running */
417 lpfc_can_disctmo(phba);
419 /* Must process IOCBs on all rings to handle ABORTed I/Os */
424 lpfc_linkup(struct lpfc_hba * phba)
426 struct lpfc_nodelist *ndlp, *next_ndlp;
427 struct list_head *listp, *node_list[7];
430 spin_lock_irq(phba->host->host_lock);
431 phba->hba_state = LPFC_LINK_UP;
432 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
433 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
434 phba->fc_flag |= FC_NDISC_ACTIVE;
435 phba->fc_ns_retry = 0;
436 spin_unlock_irq(phba->host->host_lock);
439 node_list[0] = &phba->fc_plogi_list;
440 node_list[1] = &phba->fc_adisc_list;
441 node_list[2] = &phba->fc_reglogin_list;
442 node_list[3] = &phba->fc_prli_list;
443 node_list[4] = &phba->fc_nlpunmap_list;
444 node_list[5] = &phba->fc_nlpmap_list;
445 node_list[6] = &phba->fc_npr_list;
446 for (i = 0; i < 7; i++) {
447 listp = node_list[i];
448 if (list_empty(listp))
451 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
452 if (phba->fc_flag & FC_LBIT) {
453 if (ndlp->nlp_type & NLP_FABRIC) {
454 /* On Linkup its safe to clean up the
455 * ndlp from Fabric connections.
457 lpfc_nlp_list(phba, ndlp,
459 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
460 /* Fail outstanding IO now since device
461 * is marked for PLOGI.
463 lpfc_unreg_rpi(phba, ndlp);
469 /* free any ndlp's on unused list */
470 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
472 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
479 * This routine handles processing a CLEAR_LA mailbox
480 * command upon completion. It is setup in the LPFC_MBOXQ
481 * as the completion routine when the command is
482 * handed off to the SLI layer.
485 lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
487 struct lpfc_sli *psli;
493 /* Since we don't do discovery right now, turn these off here */
494 psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
495 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
496 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
498 /* Check for error */
499 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
500 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
501 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
502 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
504 phba->brd_no, mb->mbxStatus, phba->hba_state);
506 phba->hba_state = LPFC_HBA_ERROR;
510 if (phba->fc_flag & FC_ABORT_DISCOVERY)
513 phba->num_disc_nodes = 0;
514 /* go thru NPR list and issue ELS PLOGIs */
515 if (phba->fc_npr_cnt) {
516 lpfc_els_disc_plogi(phba);
519 if (!phba->num_disc_nodes) {
520 spin_lock_irq(phba->host->host_lock);
521 phba->fc_flag &= ~FC_NDISC_ACTIVE;
522 spin_unlock_irq(phba->host->host_lock);
525 phba->hba_state = LPFC_HBA_READY;
528 /* Device Discovery completes */
529 lpfc_printf_log(phba,
532 "%d:0225 Device Discovery completes\n",
535 mempool_free( pmb, phba->mbox_mem_pool);
537 spin_lock_irq(phba->host->host_lock);
538 phba->fc_flag &= ~FC_ABORT_DISCOVERY;
539 if (phba->fc_flag & FC_ESTABLISH_LINK) {
540 phba->fc_flag &= ~FC_ESTABLISH_LINK;
542 spin_unlock_irq(phba->host->host_lock);
544 del_timer_sync(&phba->fc_estabtmo);
546 lpfc_can_disctmo(phba);
548 /* turn on Link Attention interrupts */
549 spin_lock_irq(phba->host->host_lock);
550 psli->sli_flag |= LPFC_PROCESS_LA;
551 control = readl(phba->HCregaddr);
552 control |= HC_LAINT_ENA;
553 writel(control, phba->HCregaddr);
554 readl(phba->HCregaddr); /* flush */
555 spin_unlock_irq(phba->host->host_lock);
561 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
563 struct lpfc_sli *psli = &phba->sli;
566 if (pmb->mb.mbxStatus)
569 mempool_free(pmb, phba->mbox_mem_pool);
571 if (phba->fc_topology == TOPOLOGY_LOOP &&
572 phba->fc_flag & FC_PUBLIC_LOOP &&
573 !(phba->fc_flag & FC_LBIT)) {
574 /* Need to wait for FAN - use discovery timer
575 * for timeout. hba_state is identically
576 * LPFC_LOCAL_CFG_LINK while waiting for FAN
578 lpfc_set_disctmo(phba);
582 /* Start discovery by sending a FLOGI. hba_state is identically
583 * LPFC_FLOGI while waiting for FLOGI cmpl
585 phba->hba_state = LPFC_FLOGI;
586 lpfc_set_disctmo(phba);
587 lpfc_initial_flogi(phba);
591 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
592 "%d:0306 CONFIG_LINK mbxStatus error x%x "
594 phba->brd_no, pmb->mb.mbxStatus, phba->hba_state);
598 phba->hba_state = LPFC_HBA_ERROR;
600 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
601 "%d:0200 CONFIG_LINK bad hba state x%x\n",
602 phba->brd_no, phba->hba_state);
604 lpfc_clear_la(phba, pmb);
605 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
606 rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
607 if (rc == MBX_NOT_FINISHED) {
608 mempool_free(pmb, phba->mbox_mem_pool);
609 lpfc_disc_flush_list(phba);
610 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
611 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
612 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
613 phba->hba_state = LPFC_HBA_READY;
619 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
621 struct lpfc_sli *psli = &phba->sli;
622 MAILBOX_t *mb = &pmb->mb;
623 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
626 /* Check for error */
628 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
629 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
630 "%d:0319 READ_SPARAM mbxStatus error x%x "
632 phba->brd_no, mb->mbxStatus, phba->hba_state);
635 phba->hba_state = LPFC_HBA_ERROR;
639 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
640 sizeof (struct serv_parm));
641 memcpy((uint8_t *) & phba->fc_nodename,
642 (uint8_t *) & phba->fc_sparam.nodeName,
643 sizeof (struct lpfc_name));
644 memcpy((uint8_t *) & phba->fc_portname,
645 (uint8_t *) & phba->fc_sparam.portName,
646 sizeof (struct lpfc_name));
647 lpfc_mbuf_free(phba, mp->virt, mp->phys);
649 mempool_free( pmb, phba->mbox_mem_pool);
653 pmb->context1 = NULL;
654 lpfc_mbuf_free(phba, mp->virt, mp->phys);
656 if (phba->hba_state != LPFC_CLEAR_LA) {
657 lpfc_clear_la(phba, pmb);
658 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
659 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
660 == MBX_NOT_FINISHED) {
661 mempool_free( pmb, phba->mbox_mem_pool);
662 lpfc_disc_flush_list(phba);
663 psli->ring[(psli->ip_ring)].flag &=
664 ~LPFC_STOP_IOCB_EVENT;
665 psli->ring[(psli->fcp_ring)].flag &=
666 ~LPFC_STOP_IOCB_EVENT;
667 psli->ring[(psli->next_ring)].flag &=
668 ~LPFC_STOP_IOCB_EVENT;
669 phba->hba_state = LPFC_HBA_READY;
672 mempool_free( pmb, phba->mbox_mem_pool);
678 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
681 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
682 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
683 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
685 spin_lock_irq(phba->host->host_lock);
686 switch (la->UlnkSpeed) {
688 phba->fc_linkspeed = LA_1GHZ_LINK;
691 phba->fc_linkspeed = LA_2GHZ_LINK;
694 phba->fc_linkspeed = LA_4GHZ_LINK;
697 phba->fc_linkspeed = LA_UNKNW_LINK;
701 phba->fc_topology = la->topology;
703 if (phba->fc_topology == TOPOLOGY_LOOP) {
704 /* Get Loop Map information */
707 phba->fc_flag |= FC_LBIT;
709 phba->fc_myDID = la->granted_AL_PA;
710 i = la->un.lilpBde64.tus.f.bdeSize;
713 phba->alpa_map[0] = 0;
715 if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
726 numalpa = phba->alpa_map[0];
728 while (j < numalpa) {
729 memset(un.pamap, 0, 16);
730 for (k = 1; j < numalpa; k++) {
732 phba->alpa_map[j + 1];
737 /* Link Up Event ALPA map */
738 lpfc_printf_log(phba,
741 "%d:1304 Link Up Event "
742 "ALPA map Data: x%x "
745 un.pa.wd1, un.pa.wd2,
746 un.pa.wd3, un.pa.wd4);
751 phba->fc_myDID = phba->fc_pref_DID;
752 phba->fc_flag |= FC_LBIT;
754 spin_unlock_irq(phba->host->host_lock);
758 lpfc_read_sparam(phba, sparam_mbox);
759 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
760 lpfc_sli_issue_mbox(phba, sparam_mbox,
761 (MBX_NOWAIT | MBX_STOP_IOCB));
765 phba->hba_state = LPFC_LOCAL_CFG_LINK;
766 lpfc_config_link(phba, cfglink_mbox);
767 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
768 lpfc_sli_issue_mbox(phba, cfglink_mbox,
769 (MBX_NOWAIT | MBX_STOP_IOCB));
774 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
776 struct lpfc_sli *psli = &phba->sli;
780 /* turn on Link Attention interrupts - no CLEAR_LA needed */
781 spin_lock_irq(phba->host->host_lock);
782 psli->sli_flag |= LPFC_PROCESS_LA;
783 control = readl(phba->HCregaddr);
784 control |= HC_LAINT_ENA;
785 writel(control, phba->HCregaddr);
786 readl(phba->HCregaddr); /* flush */
787 spin_unlock_irq(phba->host->host_lock);
791 * This routine handles processing a READ_LA mailbox
792 * command upon completion. It is setup in the LPFC_MBOXQ
793 * as the completion routine when the command is
794 * handed off to the SLI layer.
797 lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
800 MAILBOX_t *mb = &pmb->mb;
801 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
803 /* Check for error */
805 lpfc_printf_log(phba,
808 "%d:1307 READ_LA mbox error x%x state x%x\n",
810 mb->mbxStatus, phba->hba_state);
811 lpfc_mbx_issue_link_down(phba);
812 phba->hba_state = LPFC_HBA_ERROR;
813 goto lpfc_mbx_cmpl_read_la_free_mbuf;
816 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
818 memcpy(&phba->alpa_map[0], mp->virt, 128);
820 spin_lock_irq(phba->host->host_lock);
822 phba->fc_flag |= FC_BYPASSED_MODE;
824 phba->fc_flag &= ~FC_BYPASSED_MODE;
825 spin_unlock_irq(phba->host->host_lock);
827 if (((phba->fc_eventTag + 1) < la->eventTag) ||
828 (phba->fc_eventTag == la->eventTag)) {
829 phba->fc_stat.LinkMultiEvent++;
830 if (la->attType == AT_LINK_UP) {
831 if (phba->fc_eventTag != 0)
836 phba->fc_eventTag = la->eventTag;
838 if (la->attType == AT_LINK_UP) {
839 phba->fc_stat.LinkUp++;
840 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
841 "%d:1303 Link Up Event x%x received "
842 "Data: x%x x%x x%x x%x\n",
843 phba->brd_no, la->eventTag, phba->fc_eventTag,
844 la->granted_AL_PA, la->UlnkSpeed,
846 lpfc_mbx_process_link_up(phba, la);
848 phba->fc_stat.LinkDown++;
849 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
850 "%d:1305 Link Down Event x%x received "
851 "Data: x%x x%x x%x\n",
852 phba->brd_no, la->eventTag, phba->fc_eventTag,
853 phba->hba_state, phba->fc_flag);
854 lpfc_mbx_issue_link_down(phba);
857 lpfc_mbx_cmpl_read_la_free_mbuf:
858 lpfc_mbuf_free(phba, mp->virt, mp->phys);
860 mempool_free(pmb, phba->mbox_mem_pool);
865 * This routine handles processing a REG_LOGIN mailbox
866 * command upon completion. It is setup in the LPFC_MBOXQ
867 * as the completion routine when the command is
868 * handed off to the SLI layer.
871 lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
873 struct lpfc_sli *psli;
875 struct lpfc_dmabuf *mp;
876 struct lpfc_nodelist *ndlp;
881 ndlp = (struct lpfc_nodelist *) pmb->context2;
882 mp = (struct lpfc_dmabuf *) (pmb->context1);
884 pmb->context1 = NULL;
886 /* Good status, call state machine */
887 lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
888 lpfc_mbuf_free(phba, mp->virt, mp->phys);
890 mempool_free( pmb, phba->mbox_mem_pool);
896 * This routine handles processing a Fabric REG_LOGIN mailbox
897 * command upon completion. It is setup in the LPFC_MBOXQ
898 * as the completion routine when the command is
899 * handed off to the SLI layer.
902 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
904 struct lpfc_sli *psli;
906 struct lpfc_dmabuf *mp;
907 struct lpfc_nodelist *ndlp;
908 struct lpfc_nodelist *ndlp_fdmi;
914 ndlp = (struct lpfc_nodelist *) pmb->context2;
915 mp = (struct lpfc_dmabuf *) (pmb->context1);
918 lpfc_mbuf_free(phba, mp->virt, mp->phys);
920 mempool_free( pmb, phba->mbox_mem_pool);
921 mempool_free( ndlp, phba->nlp_mem_pool);
923 /* FLOGI failed, so just use loop map to make discovery list */
924 lpfc_disc_list_loopmap(phba);
926 /* Start discovery */
927 lpfc_disc_start(phba);
931 pmb->context1 = NULL;
933 ndlp->nlp_rpi = mb->un.varWords[0];
934 ndlp->nlp_type |= NLP_FABRIC;
935 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
936 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
938 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
939 /* This NPort has been assigned an NPort_ID by the fabric as a
940 * result of the completed fabric login. Issue a State Change
941 * Registration (SCR) ELS request to the fabric controller
942 * (SCR_DID) so that this NPort gets RSCN events from the
945 lpfc_issue_els_scr(phba, SCR_DID, 0);
947 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
949 /* Allocate a new node instance. If the pool is empty,
950 * start the discovery process and skip the Nameserver
951 * login process. This is attempted again later on.
952 * Otherwise, issue a Port Login (PLOGI) to NameServer.
954 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
956 lpfc_disc_start(phba);
957 lpfc_mbuf_free(phba, mp->virt, mp->phys);
959 mempool_free( pmb, phba->mbox_mem_pool);
962 lpfc_nlp_init(phba, ndlp, NameServer_DID);
963 ndlp->nlp_type |= NLP_FABRIC;
966 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
967 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
968 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
969 if (phba->cfg_fdmi_on) {
970 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
973 lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
974 ndlp_fdmi->nlp_type |= NLP_FABRIC;
975 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
976 lpfc_issue_els_plogi(phba, FDMI_DID, 0);
981 lpfc_mbuf_free(phba, mp->virt, mp->phys);
983 mempool_free( pmb, phba->mbox_mem_pool);
988 * This routine handles processing a NameServer REG_LOGIN mailbox
989 * command upon completion. It is setup in the LPFC_MBOXQ
990 * as the completion routine when the command is
991 * handed off to the SLI layer.
994 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
996 struct lpfc_sli *psli;
998 struct lpfc_dmabuf *mp;
999 struct lpfc_nodelist *ndlp;
1004 ndlp = (struct lpfc_nodelist *) pmb->context2;
1005 mp = (struct lpfc_dmabuf *) (pmb->context1);
1007 if (mb->mbxStatus) {
1008 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1010 mempool_free( pmb, phba->mbox_mem_pool);
1011 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1013 /* RegLogin failed, so just use loop map to make discovery
1015 lpfc_disc_list_loopmap(phba);
1017 /* Start discovery */
1018 lpfc_disc_start(phba);
1022 pmb->context1 = NULL;
1024 ndlp->nlp_rpi = mb->un.varWords[0];
1025 ndlp->nlp_type |= NLP_FABRIC;
1026 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1027 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1029 if (phba->hba_state < LPFC_HBA_READY) {
1030 /* Link up discovery requires Fabrib registration. */
1031 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
1032 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
1033 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
1036 phba->fc_ns_retry = 0;
1037 /* Good status, issue CT Request to NameServer */
1038 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
1039 /* Cannot issue NameServer Query, so finish up discovery */
1040 lpfc_disc_start(phba);
1043 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1045 mempool_free( pmb, phba->mbox_mem_pool);
1051 lpfc_register_remote_port(struct lpfc_hba * phba,
1052 struct lpfc_nodelist * ndlp)
1054 struct fc_rport *rport;
1055 struct lpfc_rport_data *rdata;
1056 struct fc_rport_identifiers rport_ids;
1058 /* Remote port has reappeared. Re-register w/ FC transport */
1059 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1060 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1061 rport_ids.port_id = ndlp->nlp_DID;
1062 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1064 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
1066 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1067 "Warning: fc_remote_port_add failed\n");
1071 /* initialize static port data */
1072 rport->maxframe_size = ndlp->nlp_maxframe;
1073 rport->supported_classes = ndlp->nlp_class_sup;
1074 if ((rport->scsi_target_id != -1) &&
1075 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1076 ndlp->nlp_sid = rport->scsi_target_id;
1078 rdata = rport->dd_data;
1079 rdata->pnode = ndlp;
1081 if (ndlp->nlp_type & NLP_FCP_TARGET)
1082 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1083 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1084 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1087 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1088 fc_remote_port_rolechg(rport, rport_ids.roles);
1095 lpfc_unregister_remote_port(struct lpfc_hba * phba,
1096 struct lpfc_nodelist * ndlp)
1098 struct fc_rport *rport = ndlp->rport;
1099 struct lpfc_rport_data *rdata = rport->dd_data;
1102 rdata->pnode = NULL;
1103 fc_remote_port_delete(rport);
1109 lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1111 enum { none, unmapped, mapped } rport_add = none, rport_del = none;
1112 struct lpfc_sli *psli;
1115 /* Sanity check to ensure we are not moving to / from the same list */
1116 if ((nlp->nlp_flag & NLP_LIST_MASK) == list)
1117 if (list != NLP_NO_LIST)
1120 spin_lock_irq(phba->host->host_lock);
1121 switch (nlp->nlp_flag & NLP_LIST_MASK) {
1122 case NLP_NO_LIST: /* Not on any list */
1124 case NLP_UNUSED_LIST:
1125 phba->fc_unused_cnt--;
1126 list_del(&nlp->nlp_listp);
1128 case NLP_PLOGI_LIST:
1129 phba->fc_plogi_cnt--;
1130 list_del(&nlp->nlp_listp);
1132 case NLP_ADISC_LIST:
1133 phba->fc_adisc_cnt--;
1134 list_del(&nlp->nlp_listp);
1136 case NLP_REGLOGIN_LIST:
1137 phba->fc_reglogin_cnt--;
1138 list_del(&nlp->nlp_listp);
1141 phba->fc_prli_cnt--;
1142 list_del(&nlp->nlp_listp);
1144 case NLP_UNMAPPED_LIST:
1145 phba->fc_unmap_cnt--;
1146 list_del(&nlp->nlp_listp);
1147 nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1148 nlp->nlp_type &= ~NLP_FC_NODE;
1149 phba->nport_event_cnt++;
1151 rport_del = unmapped;
1153 case NLP_MAPPED_LIST:
1155 list_del(&nlp->nlp_listp);
1156 phba->nport_event_cnt++;
1162 list_del(&nlp->nlp_listp);
1163 /* Stop delay tmo if taking node off NPR list */
1164 if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
1165 (list != NLP_NPR_LIST)) {
1166 spin_unlock_irq(phba->host->host_lock);
1167 lpfc_cancel_retry_delay_tmo(phba, nlp);
1168 spin_lock_irq(phba->host->host_lock);
1173 nlp->nlp_flag &= ~NLP_LIST_MASK;
1175 /* Add NPort <did> to <num> list */
1176 lpfc_printf_log(phba,
1179 "%d:0904 Add NPort x%x to %d list Data: x%x\n",
1181 nlp->nlp_DID, list, nlp->nlp_flag);
1184 case NLP_NO_LIST: /* No list, just remove it */
1185 spin_unlock_irq(phba->host->host_lock);
1186 lpfc_nlp_remove(phba, nlp);
1187 spin_lock_irq(phba->host->host_lock);
1188 /* as node removed - stop further transport calls */
1191 case NLP_UNUSED_LIST:
1192 nlp->nlp_flag |= list;
1193 /* Put it at the end of the unused list */
1194 list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
1195 phba->fc_unused_cnt++;
1197 case NLP_PLOGI_LIST:
1198 nlp->nlp_flag |= list;
1199 /* Put it at the end of the plogi list */
1200 list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
1201 phba->fc_plogi_cnt++;
1203 case NLP_ADISC_LIST:
1204 nlp->nlp_flag |= list;
1205 /* Put it at the end of the adisc list */
1206 list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
1207 phba->fc_adisc_cnt++;
1209 case NLP_REGLOGIN_LIST:
1210 nlp->nlp_flag |= list;
1211 /* Put it at the end of the reglogin list */
1212 list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
1213 phba->fc_reglogin_cnt++;
1216 nlp->nlp_flag |= list;
1217 /* Put it at the end of the prli list */
1218 list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
1219 phba->fc_prli_cnt++;
1221 case NLP_UNMAPPED_LIST:
1222 rport_add = unmapped;
1223 /* ensure all vestiges of "mapped" significance are gone */
1224 nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1225 nlp->nlp_flag |= list;
1226 /* Put it at the end of the unmap list */
1227 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1228 phba->fc_unmap_cnt++;
1229 phba->nport_event_cnt++;
1230 /* stop nodev tmo if running */
1231 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1232 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1233 spin_unlock_irq(phba->host->host_lock);
1234 del_timer_sync(&nlp->nlp_tmofunc);
1235 spin_lock_irq(phba->host->host_lock);
1236 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1237 list_del_init(&nlp->nodev_timeout_evt.
1241 nlp->nlp_type |= NLP_FC_NODE;
1243 case NLP_MAPPED_LIST:
1245 nlp->nlp_flag |= list;
1246 /* Put it at the end of the map list */
1247 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1249 phba->nport_event_cnt++;
1250 /* stop nodev tmo if running */
1251 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1252 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1253 spin_unlock_irq(phba->host->host_lock);
1254 del_timer_sync(&nlp->nlp_tmofunc);
1255 spin_lock_irq(phba->host->host_lock);
1256 if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1257 list_del_init(&nlp->nodev_timeout_evt.
1263 nlp->nlp_flag |= list;
1264 /* Put it at the end of the npr list */
1265 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1268 if (!(nlp->nlp_flag & NLP_NODEV_TMO))
1269 mod_timer(&nlp->nlp_tmofunc,
1270 jiffies + HZ * phba->cfg_nodev_tmo);
1272 nlp->nlp_flag |= NLP_NODEV_TMO;
1273 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1279 spin_unlock_irq(phba->host->host_lock);
1282 * We make all the calls into the transport after we have
1283 * moved the node between lists. This so that we don't
1284 * release the lock while in-between lists.
1287 /* Don't upcall midlayer if we're unloading */
1288 if (!(phba->fc_flag & FC_UNLOADING)) {
1290 * We revalidate the rport pointer as the "add" function
1291 * may have removed the remote port.
1293 if ((rport_del != none) && nlp->rport)
1294 lpfc_unregister_remote_port(phba, nlp);
1296 if (rport_add != none) {
1298 * Tell the fc transport about the port, if we haven't
1299 * already. If we have, and it's a scsi entity, be
1300 * sure to unblock any attached scsi devices
1303 lpfc_register_remote_port(phba, nlp);
1306 * if we added to Mapped list, but the remote port
1307 * registration failed or assigned a target id outside
1308 * our presentable range - move the node to the
1311 if ((rport_add == mapped) &&
1313 (nlp->rport->scsi_target_id == -1) ||
1314 (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) {
1315 nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1316 spin_lock_irq(phba->host->host_lock);
1317 nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1318 spin_unlock_irq(phba->host->host_lock);
1319 lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST);
1327 * Start / ReStart rescue timer for Discovery / RSCN handling
1330 lpfc_set_disctmo(struct lpfc_hba * phba)
1334 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
1335 /* For FAN, timeout should be greater then edtov */
1336 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1338 /* Normal discovery timeout should be > then ELS/CT timeout
1339 * FC spec states we need 3 * ratov for CT requests
1341 tmo = ((phba->fc_ratov * 3) + 3);
1344 mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
1345 spin_lock_irq(phba->host->host_lock);
1346 phba->fc_flag |= FC_DISC_TMO;
1347 spin_unlock_irq(phba->host->host_lock);
1349 /* Start Discovery Timer state <hba_state> */
1350 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1351 "%d:0247 Start Discovery Timer state x%x "
1352 "Data: x%x x%lx x%x x%x\n",
1354 phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
1355 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1361 * Cancel rescue timer for Discovery / RSCN handling
1364 lpfc_can_disctmo(struct lpfc_hba * phba)
1366 /* Turn off discovery timer if its running */
1367 if (phba->fc_flag & FC_DISC_TMO) {
1368 spin_lock_irq(phba->host->host_lock);
1369 phba->fc_flag &= ~FC_DISC_TMO;
1370 spin_unlock_irq(phba->host->host_lock);
1371 del_timer_sync(&phba->fc_disctmo);
1372 phba->work_hba_events &= ~WORKER_DISC_TMO;
1375 /* Cancel Discovery Timer state <hba_state> */
1376 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1377 "%d:0248 Cancel Discovery Timer state x%x "
1378 "Data: x%x x%x x%x\n",
1379 phba->brd_no, phba->hba_state, phba->fc_flag,
1380 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1386 * Check specified ring for outstanding IOCB on the SLI queue
1387 * Return true if iocb matches the specified nport
1390 lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1391 struct lpfc_sli_ring * pring,
1392 struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
1394 struct lpfc_sli *psli;
1399 if (pring->ringno == LPFC_ELS_RING) {
1400 switch (icmd->ulpCommand) {
1401 case CMD_GEN_REQUEST64_CR:
1402 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1404 case CMD_ELS_REQUEST64_CR:
1405 case CMD_XMIT_ELS_RSP64_CX:
1406 if (iocb->context1 == (uint8_t *) ndlp)
1409 } else if (pring->ringno == psli->ip_ring) {
1411 } else if (pring->ringno == psli->fcp_ring) {
1412 /* Skip match check if waiting to relogin to FCP target */
1413 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1414 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1417 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1420 } else if (pring->ringno == psli->next_ring) {
1427 * Free resources / clean up outstanding I/Os
1428 * associated with nlp_rpi in the LPFC_NODELIST entry.
1431 lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1433 struct lpfc_sli *psli;
1434 struct lpfc_sli_ring *pring;
1435 struct lpfc_iocbq *iocb, *next_iocb;
1440 * Everything that matches on txcmplq will be returned
1441 * by firmware with a no rpi error.
1444 rpi = ndlp->nlp_rpi;
1446 /* Now process each ring */
1447 for (i = 0; i < psli->num_rings; i++) {
1448 pring = &psli->ring[i];
1450 spin_lock_irq(phba->host->host_lock);
1451 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1454 * Check to see if iocb matches the nport we are
1457 if ((lpfc_check_sli_ndlp
1458 (phba, pring, iocb, ndlp))) {
1459 /* It matches, so deque and call compl
1461 list_del(&iocb->list);
1463 if (iocb->iocb_cmpl) {
1466 IOSTAT_LOCAL_REJECT;
1467 icmd->un.ulpWord[4] =
1469 spin_unlock_irq(phba->host->
1471 (iocb->iocb_cmpl) (phba,
1473 spin_lock_irq(phba->host->
1476 lpfc_sli_release_iocbq(phba,
1480 spin_unlock_irq(phba->host->host_lock);
1488 * Free rpi associated with LPFC_NODELIST entry.
1489 * This routine is called from lpfc_freenode(), when we are removing
1490 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1491 * LOGO that completes successfully, and we are waiting to PLOGI back
1492 * to the remote NPort. In addition, it is called after we receive
1493 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1494 * we are waiting to PLOGI back to the remote NPort.
1497 lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1502 if (ndlp->nlp_rpi) {
1503 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1504 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
1505 mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
1506 rc = lpfc_sli_issue_mbox
1507 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1508 if (rc == MBX_NOT_FINISHED)
1509 mempool_free( mbox, phba->mbox_mem_pool);
1511 lpfc_no_rpi(phba, ndlp);
1519 * Free resources associated with LPFC_NODELIST entry
1520 * so it can be freed.
1523 lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1526 LPFC_MBOXQ_t *nextmb;
1527 struct lpfc_dmabuf *mp;
1529 /* Cleanup node for NPort <nlp_DID> */
1530 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1531 "%d:0900 Cleanup node for NPort x%x "
1532 "Data: x%x x%x x%x\n",
1533 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1534 ndlp->nlp_state, ndlp->nlp_rpi);
1536 lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
1539 * if unloading the driver - just leave the remote port in place.
1540 * The driver unload will force the attached devices to detach
1541 * and flush cache's w/o generating flush errors.
1543 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
1544 lpfc_unregister_remote_port(phba, ndlp);
1545 ndlp->nlp_sid = NLP_NO_SID;
1548 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1549 if ((mb = phba->sli.mbox_active)) {
1550 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1551 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1552 mb->context2 = NULL;
1553 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1556 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1557 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1558 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1559 mp = (struct lpfc_dmabuf *) (mb->context1);
1561 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1564 list_del(&mb->list);
1565 mempool_free(mb, phba->mbox_mem_pool);
1569 lpfc_els_abort(phba,ndlp,0);
1570 spin_lock_irq(phba->host->host_lock);
1571 ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO);
1572 spin_unlock_irq(phba->host->host_lock);
1573 del_timer_sync(&ndlp->nlp_tmofunc);
1575 ndlp->nlp_last_elscmd = 0;
1576 del_timer_sync(&ndlp->nlp_delayfunc);
1578 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1579 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1580 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1581 list_del_init(&ndlp->els_retry_evt.evt_listp);
1583 lpfc_unreg_rpi(phba, ndlp);
1589 * Check to see if we can free the nlp back to the freelist.
1590 * If we are in the middle of using the nlp in the discovery state
1591 * machine, defer the free till we reach the end of the state machine.
1594 lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1596 if (ndlp->nlp_flag & NLP_NODEV_TMO) {
1597 spin_lock_irq(phba->host->host_lock);
1598 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1599 spin_unlock_irq(phba->host->host_lock);
1600 del_timer_sync(&ndlp->nlp_tmofunc);
1601 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1602 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1607 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1608 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1611 if (ndlp->nlp_disc_refcnt) {
1612 spin_lock_irq(phba->host->host_lock);
1613 ndlp->nlp_flag |= NLP_DELAY_REMOVE;
1614 spin_unlock_irq(phba->host->host_lock);
1616 lpfc_freenode(phba, ndlp);
1617 mempool_free( ndlp, phba->nlp_mem_pool);
1623 lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1629 if (did == Bcast_DID)
1632 if (ndlp->nlp_DID == 0) {
1636 /* First check for Direct match */
1637 if (ndlp->nlp_DID == did)
1640 /* Next check for area/domain identically equals 0 match */
1641 mydid.un.word = phba->fc_myDID;
1642 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1646 matchdid.un.word = did;
1647 ndlpdid.un.word = ndlp->nlp_DID;
1648 if (matchdid.un.b.id == ndlpdid.un.b.id) {
1649 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
1650 (mydid.un.b.area == matchdid.un.b.area)) {
1651 if ((ndlpdid.un.b.domain == 0) &&
1652 (ndlpdid.un.b.area == 0)) {
1653 if (ndlpdid.un.b.id)
1659 matchdid.un.word = ndlp->nlp_DID;
1660 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
1661 (mydid.un.b.area == ndlpdid.un.b.area)) {
1662 if ((matchdid.un.b.domain == 0) &&
1663 (matchdid.un.b.area == 0)) {
1664 if (matchdid.un.b.id)
1672 /* Search for a nodelist entry on a specific list */
1673 struct lpfc_nodelist *
1674 lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1676 struct lpfc_nodelist *ndlp, *next_ndlp;
1679 spin_lock_irq(phba->host->host_lock);
1680 if (order & NLP_SEARCH_UNMAPPED) {
1681 list_for_each_entry_safe(ndlp, next_ndlp,
1682 &phba->fc_nlpunmap_list, nlp_listp) {
1683 if (lpfc_matchdid(phba, ndlp, did)) {
1684 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1685 ((uint32_t) ndlp->nlp_xri << 16) |
1686 ((uint32_t) ndlp->nlp_type << 8) |
1687 ((uint32_t) ndlp->nlp_rpi & 0xff));
1688 /* FIND node DID unmapped */
1689 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1690 "%d:0929 FIND node DID unmapped"
1691 " Data: x%p x%x x%x x%x\n",
1693 ndlp, ndlp->nlp_DID,
1694 ndlp->nlp_flag, data1);
1695 spin_unlock_irq(phba->host->host_lock);
1701 if (order & NLP_SEARCH_MAPPED) {
1702 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1704 if (lpfc_matchdid(phba, ndlp, did)) {
1706 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1707 ((uint32_t) ndlp->nlp_xri << 16) |
1708 ((uint32_t) ndlp->nlp_type << 8) |
1709 ((uint32_t) ndlp->nlp_rpi & 0xff));
1710 /* FIND node DID mapped */
1711 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1712 "%d:0930 FIND node DID mapped "
1713 "Data: x%p x%x x%x x%x\n",
1715 ndlp, ndlp->nlp_DID,
1716 ndlp->nlp_flag, data1);
1717 spin_unlock_irq(phba->host->host_lock);
1723 if (order & NLP_SEARCH_PLOGI) {
1724 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1726 if (lpfc_matchdid(phba, ndlp, did)) {
1728 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1729 ((uint32_t) ndlp->nlp_xri << 16) |
1730 ((uint32_t) ndlp->nlp_type << 8) |
1731 ((uint32_t) ndlp->nlp_rpi & 0xff));
1732 /* LOG change to PLOGI */
1733 /* FIND node DID plogi */
1734 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1735 "%d:0908 FIND node DID plogi "
1736 "Data: x%p x%x x%x x%x\n",
1738 ndlp, ndlp->nlp_DID,
1739 ndlp->nlp_flag, data1);
1740 spin_unlock_irq(phba->host->host_lock);
1746 if (order & NLP_SEARCH_ADISC) {
1747 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1749 if (lpfc_matchdid(phba, ndlp, did)) {
1751 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1752 ((uint32_t) ndlp->nlp_xri << 16) |
1753 ((uint32_t) ndlp->nlp_type << 8) |
1754 ((uint32_t) ndlp->nlp_rpi & 0xff));
1755 /* LOG change to ADISC */
1756 /* FIND node DID adisc */
1757 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1758 "%d:0931 FIND node DID adisc "
1759 "Data: x%p x%x x%x x%x\n",
1761 ndlp, ndlp->nlp_DID,
1762 ndlp->nlp_flag, data1);
1763 spin_unlock_irq(phba->host->host_lock);
1769 if (order & NLP_SEARCH_REGLOGIN) {
1770 list_for_each_entry_safe(ndlp, next_ndlp,
1771 &phba->fc_reglogin_list, nlp_listp) {
1772 if (lpfc_matchdid(phba, ndlp, did)) {
1774 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1775 ((uint32_t) ndlp->nlp_xri << 16) |
1776 ((uint32_t) ndlp->nlp_type << 8) |
1777 ((uint32_t) ndlp->nlp_rpi & 0xff));
1778 /* LOG change to REGLOGIN */
1779 /* FIND node DID reglogin */
1780 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1781 "%d:0931 FIND node DID reglogin"
1782 " Data: x%p x%x x%x x%x\n",
1784 ndlp, ndlp->nlp_DID,
1785 ndlp->nlp_flag, data1);
1786 spin_unlock_irq(phba->host->host_lock);
1792 if (order & NLP_SEARCH_PRLI) {
1793 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1795 if (lpfc_matchdid(phba, ndlp, did)) {
1797 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1798 ((uint32_t) ndlp->nlp_xri << 16) |
1799 ((uint32_t) ndlp->nlp_type << 8) |
1800 ((uint32_t) ndlp->nlp_rpi & 0xff));
1801 /* LOG change to PRLI */
1802 /* FIND node DID prli */
1803 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1804 "%d:0931 FIND node DID prli "
1805 "Data: x%p x%x x%x x%x\n",
1807 ndlp, ndlp->nlp_DID,
1808 ndlp->nlp_flag, data1);
1809 spin_unlock_irq(phba->host->host_lock);
1815 if (order & NLP_SEARCH_NPR) {
1816 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1818 if (lpfc_matchdid(phba, ndlp, did)) {
1820 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1821 ((uint32_t) ndlp->nlp_xri << 16) |
1822 ((uint32_t) ndlp->nlp_type << 8) |
1823 ((uint32_t) ndlp->nlp_rpi & 0xff));
1824 /* LOG change to NPR */
1825 /* FIND node DID npr */
1826 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1827 "%d:0931 FIND node DID npr "
1828 "Data: x%p x%x x%x x%x\n",
1830 ndlp, ndlp->nlp_DID,
1831 ndlp->nlp_flag, data1);
1832 spin_unlock_irq(phba->host->host_lock);
1838 if (order & NLP_SEARCH_UNUSED) {
1839 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1841 if (lpfc_matchdid(phba, ndlp, did)) {
1843 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1844 ((uint32_t) ndlp->nlp_xri << 16) |
1845 ((uint32_t) ndlp->nlp_type << 8) |
1846 ((uint32_t) ndlp->nlp_rpi & 0xff));
1847 /* LOG change to UNUSED */
1848 /* FIND node DID unused */
1849 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1850 "%d:0931 FIND node DID unused "
1851 "Data: x%p x%x x%x x%x\n",
1853 ndlp, ndlp->nlp_DID,
1854 ndlp->nlp_flag, data1);
1855 spin_unlock_irq(phba->host->host_lock);
1861 spin_unlock_irq(phba->host->host_lock);
1863 /* FIND node did <did> NOT FOUND */
1864 lpfc_printf_log(phba,
1867 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1868 phba->brd_no, did, order);
1870 /* no match found */
1874 struct lpfc_nodelist *
1875 lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1877 struct lpfc_nodelist *ndlp;
1880 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did);
1882 if ((phba->fc_flag & FC_RSCN_MODE) &&
1883 ((lpfc_rscn_payload_check(phba, did) == 0)))
1885 ndlp = (struct lpfc_nodelist *)
1886 mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1889 lpfc_nlp_init(phba, ndlp, did);
1890 ndlp->nlp_state = NLP_STE_NPR_NODE;
1891 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1892 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1895 if (phba->fc_flag & FC_RSCN_MODE) {
1896 if (lpfc_rscn_payload_check(phba, did)) {
1897 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1899 /* Since this node is marked for discovery,
1900 * delay timeout is not needed.
1902 if (ndlp->nlp_flag & NLP_DELAY_TMO)
1903 lpfc_cancel_retry_delay_tmo(phba, ndlp);
1905 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1909 flg = ndlp->nlp_flag & NLP_LIST_MASK;
1910 if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST))
1912 ndlp->nlp_state = NLP_STE_NPR_NODE;
1913 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1914 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1919 /* Build a list of nodes to discover based on the loopmap */
1921 lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1924 uint32_t alpa, index;
1926 if (phba->hba_state <= LPFC_LINK_DOWN) {
1929 if (phba->fc_topology != TOPOLOGY_LOOP) {
1933 /* Check for loop map present or not */
1934 if (phba->alpa_map[0]) {
1935 for (j = 1; j <= phba->alpa_map[0]; j++) {
1936 alpa = phba->alpa_map[j];
1938 if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1941 lpfc_setup_disc_node(phba, alpa);
1944 /* No alpamap, so try all alpa's */
1945 for (j = 0; j < FC_MAXLOOP; j++) {
1946 /* If cfg_scan_down is set, start from highest
1947 * ALPA (0xef) to lowest (0x1).
1949 if (phba->cfg_scan_down)
1952 index = FC_MAXLOOP - j - 1;
1953 alpa = lpfcAlpaArray[index];
1954 if ((phba->fc_myDID & 0xff) == alpa) {
1958 lpfc_setup_disc_node(phba, alpa);
1964 /* Start Link up / RSCN discovery on NPR list */
1966 lpfc_disc_start(struct lpfc_hba * phba)
1968 struct lpfc_sli *psli;
1970 struct lpfc_nodelist *ndlp, *next_ndlp;
1971 uint32_t did_changed, num_sent;
1972 uint32_t clear_la_pending;
1977 if (phba->hba_state <= LPFC_LINK_DOWN) {
1980 if (phba->hba_state == LPFC_CLEAR_LA)
1981 clear_la_pending = 1;
1983 clear_la_pending = 0;
1985 if (phba->hba_state < LPFC_HBA_READY) {
1986 phba->hba_state = LPFC_DISC_AUTH;
1988 lpfc_set_disctmo(phba);
1990 if (phba->fc_prevDID == phba->fc_myDID) {
1995 phba->fc_prevDID = phba->fc_myDID;
1996 phba->num_disc_nodes = 0;
1998 /* Start Discovery state <hba_state> */
1999 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2000 "%d:0202 Start Discovery hba state x%x "
2001 "Data: x%x x%x x%x\n",
2002 phba->brd_no, phba->hba_state, phba->fc_flag,
2003 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
2005 /* If our did changed, we MUST do PLOGI */
2006 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2008 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2010 spin_lock_irq(phba->host->host_lock);
2011 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2012 spin_unlock_irq(phba->host->host_lock);
2017 /* First do ADISCs - if any */
2018 num_sent = lpfc_els_disc_adisc(phba);
2023 if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
2024 /* If we get here, there is nothing to ADISC */
2025 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
2026 phba->hba_state = LPFC_CLEAR_LA;
2027 lpfc_clear_la(phba, mbox);
2028 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2029 rc = lpfc_sli_issue_mbox(phba, mbox,
2030 (MBX_NOWAIT | MBX_STOP_IOCB));
2031 if (rc == MBX_NOT_FINISHED) {
2032 mempool_free( mbox, phba->mbox_mem_pool);
2033 lpfc_disc_flush_list(phba);
2034 psli->ring[(psli->ip_ring)].flag &=
2035 ~LPFC_STOP_IOCB_EVENT;
2036 psli->ring[(psli->fcp_ring)].flag &=
2037 ~LPFC_STOP_IOCB_EVENT;
2038 psli->ring[(psli->next_ring)].flag &=
2039 ~LPFC_STOP_IOCB_EVENT;
2040 phba->hba_state = LPFC_HBA_READY;
2044 /* Next do PLOGIs - if any */
2045 num_sent = lpfc_els_disc_plogi(phba);
2050 if (phba->fc_flag & FC_RSCN_MODE) {
2051 /* Check to see if more RSCNs came in while we
2052 * were processing this one.
2054 if ((phba->fc_rscn_id_cnt == 0) &&
2055 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
2056 spin_lock_irq(phba->host->host_lock);
2057 phba->fc_flag &= ~FC_RSCN_MODE;
2058 spin_unlock_irq(phba->host->host_lock);
2060 lpfc_els_handle_rscn(phba);
2067 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2068 * ring the match the sppecified nodelist.
2071 lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
2073 struct lpfc_sli *psli;
2075 struct lpfc_iocbq *iocb, *next_iocb;
2076 struct lpfc_sli_ring *pring;
2077 struct lpfc_dmabuf *mp;
2080 pring = &psli->ring[LPFC_ELS_RING];
2082 /* Error matching iocb on txq or txcmplq
2083 * First check the txq.
2085 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2086 if (iocb->context1 != ndlp) {
2090 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2091 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2093 list_del(&iocb->list);
2095 lpfc_els_free_iocb(phba, iocb);
2099 /* Next check the txcmplq */
2100 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2101 if (iocb->context1 != ndlp) {
2105 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2106 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2108 iocb->iocb_cmpl = NULL;
2109 /* context2 = cmd, context2->next = rsp, context3 =
2111 if (iocb->context2) {
2112 /* Free the response IOCB before handling the
2115 mp = (struct lpfc_dmabuf *) (iocb->context2);
2116 mp = list_get_first(&mp->list,
2120 /* Delay before releasing rsp buffer to
2121 * give UNREG mbox a chance to take
2125 &phba->freebufList);
2127 lpfc_mbuf_free(phba,
2128 ((struct lpfc_dmabuf *)
2129 iocb->context2)->virt,
2130 ((struct lpfc_dmabuf *)
2131 iocb->context2)->phys);
2132 kfree(iocb->context2);
2135 if (iocb->context3) {
2136 lpfc_mbuf_free(phba,
2137 ((struct lpfc_dmabuf *)
2138 iocb->context3)->virt,
2139 ((struct lpfc_dmabuf *)
2140 iocb->context3)->phys);
2141 kfree(iocb->context3);
2150 lpfc_disc_flush_list(struct lpfc_hba * phba)
2152 struct lpfc_nodelist *ndlp, *next_ndlp;
2154 if (phba->fc_plogi_cnt) {
2155 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
2157 lpfc_free_tx(phba, ndlp);
2158 lpfc_nlp_remove(phba, ndlp);
2161 if (phba->fc_adisc_cnt) {
2162 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
2164 lpfc_free_tx(phba, ndlp);
2165 lpfc_nlp_remove(phba, ndlp);
2171 /*****************************************************************************/
2173 * NAME: lpfc_disc_timeout
2175 * FUNCTION: Fibre Channel driver discovery timeout routine.
2177 * EXECUTION ENVIRONMENT: interrupt only
2185 /*****************************************************************************/
2187 lpfc_disc_timeout(unsigned long ptr)
2189 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2190 unsigned long flags = 0;
2192 if (unlikely(!phba))
2195 spin_lock_irqsave(phba->host->host_lock, flags);
2196 if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
2197 phba->work_hba_events |= WORKER_DISC_TMO;
2198 if (phba->work_wait)
2199 wake_up(phba->work_wait);
2201 spin_unlock_irqrestore(phba->host->host_lock, flags);
2206 lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2208 struct lpfc_sli *psli;
2209 struct lpfc_nodelist *ndlp, *next_ndlp;
2210 LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
2211 int rc, clrlaerr = 0;
2213 if (unlikely(!phba))
2216 if (!(phba->fc_flag & FC_DISC_TMO))
2221 spin_lock_irq(phba->host->host_lock);
2222 phba->fc_flag &= ~FC_DISC_TMO;
2223 spin_unlock_irq(phba->host->host_lock);
2225 switch (phba->hba_state) {
2227 case LPFC_LOCAL_CFG_LINK:
2228 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2230 lpfc_printf_log(phba,
2233 "%d:0221 FAN timeout\n",
2236 /* Start discovery by sending FLOGI, clean up old rpis */
2237 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2239 if (ndlp->nlp_type & NLP_FABRIC) {
2240 /* Clean up the ndlp on Fabric connections */
2241 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
2242 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2243 /* Fail outstanding IO now since device
2244 * is marked for PLOGI.
2246 lpfc_unreg_rpi(phba, ndlp);
2249 phba->hba_state = LPFC_FLOGI;
2250 lpfc_set_disctmo(phba);
2251 lpfc_initial_flogi(phba);
2255 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2256 /* Initial FLOGI timeout */
2257 lpfc_printf_log(phba,
2260 "%d:0222 Initial FLOGI timeout\n",
2263 /* Assume no Fabric and go on with discovery.
2264 * Check for outstanding ELS FLOGI to abort.
2267 /* FLOGI failed, so just use loop map to make discovery list */
2268 lpfc_disc_list_loopmap(phba);
2270 /* Start discovery */
2271 lpfc_disc_start(phba);
2274 case LPFC_FABRIC_CFG_LINK:
2275 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2277 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2278 "%d:0223 Timeout while waiting for NameServer "
2279 "login\n", phba->brd_no);
2281 /* Next look for NameServer ndlp */
2282 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
2284 lpfc_nlp_remove(phba, ndlp);
2285 /* Start discovery */
2286 lpfc_disc_start(phba);
2290 /* Check for wait for NameServer Rsp timeout */
2291 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2292 "%d:0224 NameServer Query timeout "
2295 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2297 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
2300 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2301 /* Try it one more time */
2302 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
2306 phba->fc_ns_retry = 0;
2309 /* Nothing to authenticate, so CLEAR_LA right now */
2310 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2313 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2314 "%d:0226 Device Discovery "
2315 "completion error\n",
2317 phba->hba_state = LPFC_HBA_ERROR;
2321 phba->hba_state = LPFC_CLEAR_LA;
2322 lpfc_clear_la(phba, clearlambox);
2323 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2324 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2325 (MBX_NOWAIT | MBX_STOP_IOCB));
2326 if (rc == MBX_NOT_FINISHED) {
2327 mempool_free(clearlambox, phba->mbox_mem_pool);
2332 /* Setup and issue mailbox INITIALIZE LINK command */
2333 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2334 if (!initlinkmbox) {
2335 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2336 "%d:0226 Device Discovery "
2337 "completion error\n",
2339 phba->hba_state = LPFC_HBA_ERROR;
2343 lpfc_linkdown(phba);
2344 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2345 phba->cfg_link_speed);
2346 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2347 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2348 (MBX_NOWAIT | MBX_STOP_IOCB));
2349 if (rc == MBX_NOT_FINISHED)
2350 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2354 case LPFC_DISC_AUTH:
2355 /* Node Authentication timeout */
2356 lpfc_printf_log(phba,
2359 "%d:0227 Node Authentication timeout\n",
2361 lpfc_disc_flush_list(phba);
2362 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2365 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2366 "%d:0226 Device Discovery "
2367 "completion error\n",
2369 phba->hba_state = LPFC_HBA_ERROR;
2372 phba->hba_state = LPFC_CLEAR_LA;
2373 lpfc_clear_la(phba, clearlambox);
2374 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2375 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2376 (MBX_NOWAIT | MBX_STOP_IOCB));
2377 if (rc == MBX_NOT_FINISHED) {
2378 mempool_free(clearlambox, phba->mbox_mem_pool);
2384 /* CLEAR LA timeout */
2385 lpfc_printf_log(phba,
2388 "%d:0228 CLEAR LA timeout\n",
2393 case LPFC_HBA_READY:
2394 if (phba->fc_flag & FC_RSCN_MODE) {
2395 lpfc_printf_log(phba,
2398 "%d:0231 RSCN timeout Data: x%x x%x\n",
2400 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2402 /* Cleanup any outstanding ELS commands */
2403 lpfc_els_flush_cmd(phba);
2405 lpfc_els_flush_rscn(phba);
2406 lpfc_disc_flush_list(phba);
2412 lpfc_disc_flush_list(phba);
2413 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2414 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2415 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2416 phba->hba_state = LPFC_HBA_READY;
2423 lpfc_nodev_timeout(unsigned long ptr)
2425 struct lpfc_hba *phba;
2426 struct lpfc_nodelist *ndlp;
2427 unsigned long iflag;
2428 struct lpfc_work_evt *evtp;
2430 ndlp = (struct lpfc_nodelist *)ptr;
2431 phba = ndlp->nlp_phba;
2432 evtp = &ndlp->nodev_timeout_evt;
2433 spin_lock_irqsave(phba->host->host_lock, iflag);
2435 if (!list_empty(&evtp->evt_listp)) {
2436 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2439 evtp->evt_arg1 = ndlp;
2440 evtp->evt = LPFC_EVT_NODEV_TMO;
2441 list_add_tail(&evtp->evt_listp, &phba->work_list);
2442 if (phba->work_wait)
2443 wake_up(phba->work_wait);
2445 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2451 * This routine handles processing a NameServer REG_LOGIN mailbox
2452 * command upon completion. It is setup in the LPFC_MBOXQ
2453 * as the completion routine when the command is
2454 * handed off to the SLI layer.
2457 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2459 struct lpfc_sli *psli;
2461 struct lpfc_dmabuf *mp;
2462 struct lpfc_nodelist *ndlp;
2467 ndlp = (struct lpfc_nodelist *) pmb->context2;
2468 mp = (struct lpfc_dmabuf *) (pmb->context1);
2470 pmb->context1 = NULL;
2472 ndlp->nlp_rpi = mb->un.varWords[0];
2473 ndlp->nlp_type |= NLP_FABRIC;
2474 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
2475 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
2477 /* Start issuing Fabric-Device Management Interface (FDMI)
2478 * command to 0xfffffa (FDMI well known port)
2480 if (phba->cfg_fdmi_on == 1) {
2481 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
2484 * Delay issuing FDMI command if fdmi-on=2
2485 * (supporting RPA/hostnmae)
2487 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2490 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2492 mempool_free( pmb, phba->mbox_mem_pool);
2498 * This routine looks up the ndlp lists
2499 * for the given RPI. If rpi found
2500 * it return the node list pointer
2503 struct lpfc_nodelist *
2504 lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2506 struct lpfc_nodelist *ndlp;
2507 struct list_head * lists[]={&phba->fc_nlpunmap_list,
2508 &phba->fc_nlpmap_list,
2509 &phba->fc_plogi_list,
2510 &phba->fc_adisc_list,
2511 &phba->fc_reglogin_list};
2514 spin_lock_irq(phba->host->host_lock);
2515 for (i = 0; i < ARRAY_SIZE(lists); i++ )
2516 list_for_each_entry(ndlp, lists[i], nlp_listp)
2517 if (ndlp->nlp_rpi == rpi) {
2518 spin_unlock_irq(phba->host->host_lock);
2521 spin_unlock_irq(phba->host->host_lock);
2526 * This routine looks up the ndlp lists
2527 * for the given WWPN. If WWPN found
2528 * it return the node list pointer
2531 struct lpfc_nodelist *
2532 lpfc_findnode_wwpn(struct lpfc_hba * phba, uint32_t order,
2533 struct lpfc_name * wwpn)
2535 struct lpfc_nodelist *ndlp;
2536 struct list_head * lists[]={&phba->fc_nlpunmap_list,
2537 &phba->fc_nlpmap_list,
2539 &phba->fc_plogi_list,
2540 &phba->fc_adisc_list,
2541 &phba->fc_reglogin_list,
2542 &phba->fc_prli_list};
2543 uint32_t search[]={NLP_SEARCH_UNMAPPED,
2548 NLP_SEARCH_REGLOGIN,
2552 spin_lock_irq(phba->host->host_lock);
2553 for (i = 0; i < ARRAY_SIZE(lists); i++ ) {
2554 if (!(order & search[i]))
2556 list_for_each_entry(ndlp, lists[i], nlp_listp) {
2557 if (memcmp(&ndlp->nlp_portname, wwpn,
2558 sizeof(struct lpfc_name)) == 0) {
2559 spin_unlock_irq(phba->host->host_lock);
2564 spin_unlock_irq(phba->host->host_lock);
2569 lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2572 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2573 INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
2574 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2575 init_timer(&ndlp->nlp_tmofunc);
2576 ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2577 ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2578 init_timer(&ndlp->nlp_delayfunc);
2579 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2580 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2581 ndlp->nlp_DID = did;
2582 ndlp->nlp_phba = phba;
2583 ndlp->nlp_sid = NLP_NO_SID;