1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_vport.h"
40 #include "lpfc_debugfs.h"
42 /* AlpaArray for assignment of scsid for scan-down and bind_method */
43 static uint8_t lpfcAlpaArray[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
59 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
62 lpfc_terminate_rport_io(struct fc_rport *rport)
64 struct lpfc_rport_data *rdata;
65 struct lpfc_nodelist * ndlp;
66 struct lpfc_hba *phba;
68 rdata = rport->dd_data;
72 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
73 printk(KERN_ERR "Cannot find remote node"
74 " to terminate I/O Data x%x\n",
79 phba = ndlp->vport->phba;
81 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
82 "rport terminate: sid:x%x did:x%x flg:x%x",
83 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
85 if (ndlp->nlp_sid != NLP_NO_SID) {
86 lpfc_sli_abort_iocb(ndlp->vport,
87 &phba->sli.ring[phba->sli.fcp_ring],
88 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
92 * A device is normally blocked for rediscovery and unblocked when
93 * devloss timeout happens. In case a vport is removed or driver
94 * unloaded before devloss timeout happens, we need to unblock here.
96 scsi_target_unblock(&rport->dev);
101 * This function will be called when dev_loss_tmo fire.
104 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
106 struct lpfc_rport_data *rdata;
107 struct lpfc_nodelist * ndlp;
108 struct lpfc_vport *vport;
109 struct lpfc_hba *phba;
110 struct completion devloss_compl;
111 struct lpfc_work_evt *evtp;
113 rdata = rport->dd_data;
117 if (rport->scsi_target_id != -1) {
118 printk(KERN_ERR "Cannot find remote node"
119 " for rport in dev_loss_tmo_callbk x%x\n",
128 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
129 "rport devlosscb: sid:x%x did:x%x flg:x%x",
130 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
132 init_completion(&devloss_compl);
133 evtp = &ndlp->dev_loss_evt;
135 if (!list_empty(&evtp->evt_listp))
138 spin_lock_irq(&phba->hbalock);
139 evtp->evt_arg1 = ndlp;
140 evtp->evt_arg2 = &devloss_compl;
141 evtp->evt = LPFC_EVT_DEV_LOSS;
142 list_add_tail(&evtp->evt_listp, &phba->work_list);
144 wake_up(phba->work_wait);
146 spin_unlock_irq(&phba->hbalock);
148 wait_for_completion(&devloss_compl);
154 * This function is called from the worker thread when dev_loss_tmo
158 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
160 struct lpfc_rport_data *rdata;
161 struct fc_rport *rport;
162 struct lpfc_vport *vport;
163 struct lpfc_hba *phba;
172 rdata = rport->dd_data;
173 name = (uint8_t *) &ndlp->nlp_portname;
177 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
178 "rport devlosstmo:did:x%x type:x%x id:x%x",
179 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
181 if (!(vport->load_flag & FC_UNLOADING) &&
182 ndlp->nlp_state == NLP_STE_MAPPED_NODE)
185 if (ndlp->nlp_type & NLP_FABRIC) {
189 /* We will clean up these Nodes in linkup */
190 put_node = rdata->pnode != NULL;
191 put_rport = ndlp->rport != NULL;
197 put_device(&rport->dev);
201 if (ndlp->nlp_sid != NLP_NO_SID) {
203 /* flush the target */
204 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
205 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
207 if (vport->load_flag & FC_UNLOADING)
211 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
212 "0203 Devloss timeout on "
213 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
214 "NPort x%x Data: x%x x%x x%x\n",
215 *name, *(name+1), *(name+2), *(name+3),
216 *(name+4), *(name+5), *(name+6), *(name+7),
217 ndlp->nlp_DID, ndlp->nlp_flag,
218 ndlp->nlp_state, ndlp->nlp_rpi);
220 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
221 "0204 Devloss timeout on "
222 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
223 "NPort x%x Data: x%x x%x x%x\n",
224 *name, *(name+1), *(name+2), *(name+3),
225 *(name+4), *(name+5), *(name+6), *(name+7),
226 ndlp->nlp_DID, ndlp->nlp_flag,
227 ndlp->nlp_state, ndlp->nlp_rpi);
230 if (!(vport->load_flag & FC_UNLOADING) &&
231 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
232 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
233 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
234 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
239 put_node = rdata->pnode != NULL;
240 put_rport = ndlp->rport != NULL;
246 put_device(&rport->dev);
252 lpfc_worker_wake_up(struct lpfc_hba *phba)
254 wake_up(phba->work_wait);
259 lpfc_work_list_done(struct lpfc_hba *phba)
261 struct lpfc_work_evt *evtp = NULL;
262 struct lpfc_nodelist *ndlp;
263 struct lpfc_vport *vport;
266 spin_lock_irq(&phba->hbalock);
267 while (!list_empty(&phba->work_list)) {
268 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
270 spin_unlock_irq(&phba->hbalock);
273 case LPFC_EVT_DEV_LOSS_DELAY:
274 free_evt = 0; /* evt is part of ndlp */
275 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
280 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
281 "rport devlossdly:did:x%x flg:x%x",
282 ndlp->nlp_DID, ndlp->nlp_flag, 0);
284 if (!(vport->load_flag & FC_UNLOADING) &&
285 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
286 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
287 lpfc_disc_state_machine(vport, ndlp, NULL,
291 case LPFC_EVT_ELS_RETRY:
292 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
293 lpfc_els_retry_delay_handler(ndlp);
294 free_evt = 0; /* evt is part of ndlp */
296 case LPFC_EVT_DEV_LOSS:
297 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
299 lpfc_dev_loss_tmo_handler(ndlp);
301 complete((struct completion *)(evtp->evt_arg2));
304 case LPFC_EVT_ONLINE:
305 if (phba->link_state < LPFC_LINK_DOWN)
306 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
308 *(int *) (evtp->evt_arg1) = 0;
309 complete((struct completion *)(evtp->evt_arg2));
311 case LPFC_EVT_OFFLINE_PREP:
312 if (phba->link_state >= LPFC_LINK_DOWN)
313 lpfc_offline_prep(phba);
314 *(int *)(evtp->evt_arg1) = 0;
315 complete((struct completion *)(evtp->evt_arg2));
317 case LPFC_EVT_OFFLINE:
319 lpfc_sli_brdrestart(phba);
320 *(int *)(evtp->evt_arg1) =
321 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
322 lpfc_unblock_mgmt_io(phba);
323 complete((struct completion *)(evtp->evt_arg2));
325 case LPFC_EVT_WARM_START:
327 lpfc_reset_barrier(phba);
328 lpfc_sli_brdreset(phba);
329 lpfc_hba_down_post(phba);
330 *(int *)(evtp->evt_arg1) =
331 lpfc_sli_brdready(phba, HS_MBRDY);
332 lpfc_unblock_mgmt_io(phba);
333 complete((struct completion *)(evtp->evt_arg2));
337 *(int *)(evtp->evt_arg1)
338 = (phba->pport->stopped)
339 ? 0 : lpfc_sli_brdkill(phba);
340 lpfc_unblock_mgmt_io(phba);
341 complete((struct completion *)(evtp->evt_arg2));
346 spin_lock_irq(&phba->hbalock);
348 spin_unlock_irq(&phba->hbalock);
353 lpfc_work_done(struct lpfc_hba *phba)
355 struct lpfc_sli_ring *pring;
356 uint32_t ha_copy, status, control, work_port_events;
357 struct lpfc_vport **vports;
358 struct lpfc_vport *vport;
361 spin_lock_irq(&phba->hbalock);
362 ha_copy = phba->work_ha;
364 spin_unlock_irq(&phba->hbalock);
366 if (ha_copy & HA_ERATT)
367 lpfc_handle_eratt(phba);
369 if (ha_copy & HA_MBATT)
370 lpfc_sli_handle_mb_event(phba);
372 if (ha_copy & HA_LATT)
373 lpfc_handle_latt(phba);
374 vports = lpfc_create_vport_work_array(phba);
376 for(i = 0; i < LPFC_MAX_VPORTS; i++) {
378 * We could have no vports in array if unloading, so if
379 * this happens then just use the pport
381 if (vports[i] == NULL && i == 0)
387 work_port_events = vport->work_port_events;
388 if (work_port_events & WORKER_DISC_TMO)
389 lpfc_disc_timeout_handler(vport);
390 if (work_port_events & WORKER_ELS_TMO)
391 lpfc_els_timeout_handler(vport);
392 if (work_port_events & WORKER_HB_TMO)
393 lpfc_hb_timeout_handler(phba);
394 if (work_port_events & WORKER_MBOX_TMO)
395 lpfc_mbox_timeout_handler(phba);
396 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
397 lpfc_unblock_fabric_iocbs(phba);
398 if (work_port_events & WORKER_FDMI_TMO)
399 lpfc_fdmi_timeout_handler(vport);
400 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
401 lpfc_ramp_down_queue_handler(phba);
402 if (work_port_events & WORKER_RAMP_UP_QUEUE)
403 lpfc_ramp_up_queue_handler(phba);
404 spin_lock_irq(&vport->work_port_lock);
405 vport->work_port_events &= ~work_port_events;
406 spin_unlock_irq(&vport->work_port_lock);
408 lpfc_destroy_vport_work_array(vports);
410 pring = &phba->sli.ring[LPFC_ELS_RING];
411 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
412 status >>= (4*LPFC_ELS_RING);
413 if ((status & HA_RXMASK)
414 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
415 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
416 pring->flag |= LPFC_DEFERRED_RING_EVENT;
418 lpfc_sli_handle_slow_ring_event(phba, pring,
421 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
424 * Turn on Ring interrupts
426 spin_lock_irq(&phba->hbalock);
427 control = readl(phba->HCregaddr);
428 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
429 lpfc_debugfs_slow_ring_trc(phba,
430 "WRK Enable ring: cntl:x%x hacopy:x%x",
431 control, ha_copy, 0);
433 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
434 writel(control, phba->HCregaddr);
435 readl(phba->HCregaddr); /* flush */
438 lpfc_debugfs_slow_ring_trc(phba,
439 "WRK Ring ok: cntl:x%x hacopy:x%x",
440 control, ha_copy, 0);
442 spin_unlock_irq(&phba->hbalock);
444 lpfc_work_list_done(phba);
448 check_work_wait_done(struct lpfc_hba *phba)
450 struct lpfc_vport *vport;
451 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
454 spin_lock_irq(&phba->hbalock);
455 list_for_each_entry(vport, &phba->port_list, listentry) {
456 if (vport->work_port_events) {
461 if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
462 kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
466 phba->work_found = 0;
467 spin_unlock_irq(&phba->hbalock);
473 lpfc_do_work(void *p)
475 struct lpfc_hba *phba = p;
477 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
479 set_user_nice(current, -20);
480 phba->work_wait = &work_waitq;
481 phba->work_found = 0;
485 rc = wait_event_interruptible(work_waitq,
486 check_work_wait_done(phba));
490 if (kthread_should_stop())
493 lpfc_work_done(phba);
495 /* If there is alot of slow ring work, like during link up
496 * check_work_wait_done() may cause this thread to not give
497 * up the CPU for very long periods of time. This may cause
498 * soft lockups or other problems. To avoid these situations
499 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
500 * consecutive iterations.
502 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
503 phba->work_found = 0;
507 phba->work_wait = NULL;
512 * This is only called to handle FC worker events. Since this a rare
513 * occurance, we allocate a struct lpfc_work_evt structure here instead of
514 * embedding it in the IOCB.
517 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
520 struct lpfc_work_evt *evtp;
524 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
525 * be queued to worker thread for processing
527 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
531 evtp->evt_arg1 = arg1;
532 evtp->evt_arg2 = arg2;
535 spin_lock_irqsave(&phba->hbalock, flags);
536 list_add_tail(&evtp->evt_listp, &phba->work_list);
538 lpfc_worker_wake_up(phba);
539 spin_unlock_irqrestore(&phba->hbalock, flags);
545 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
547 struct lpfc_hba *phba = vport->phba;
548 struct lpfc_nodelist *ndlp, *next_ndlp;
551 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
552 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
555 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
556 lpfc_unreg_rpi(vport, ndlp);
558 /* Leave Fabric nodes alone on link down */
559 if (!remove && ndlp->nlp_type & NLP_FABRIC)
561 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
564 : NLP_EVT_DEVICE_RECOVERY);
566 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
567 lpfc_mbx_unreg_vpi(vport);
568 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
573 lpfc_linkdown_port(struct lpfc_vport *vport)
575 struct lpfc_nodelist *ndlp, *next_ndlp;
576 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
578 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
580 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
581 "Link Down: state:x%x rtry:x%x flg:x%x",
582 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
584 /* Cleanup any outstanding RSCN activity */
585 lpfc_els_flush_rscn(vport);
587 /* Cleanup any outstanding ELS commands */
588 lpfc_els_flush_cmd(vport);
590 lpfc_cleanup_rpis(vport, 0);
592 /* free any ndlp's on unused list */
593 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
594 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
595 lpfc_drop_node(vport, ndlp);
597 /* Turn off discovery timer if its running */
598 lpfc_can_disctmo(vport);
602 lpfc_linkdown(struct lpfc_hba *phba)
604 struct lpfc_vport *vport = phba->pport;
605 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
606 struct lpfc_vport **vports;
610 if (phba->link_state == LPFC_LINK_DOWN) {
613 spin_lock_irq(&phba->hbalock);
614 if (phba->link_state > LPFC_LINK_DOWN) {
615 phba->link_state = LPFC_LINK_DOWN;
616 phba->pport->fc_flag &= ~FC_LBIT;
618 spin_unlock_irq(&phba->hbalock);
619 vports = lpfc_create_vport_work_array(phba);
621 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
622 /* Issue a LINK DOWN event to all nodes */
623 lpfc_linkdown_port(vports[i]);
625 lpfc_destroy_vport_work_array(vports);
626 /* Clean up any firmware default rpi's */
627 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
629 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
631 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
632 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
633 == MBX_NOT_FINISHED) {
634 mempool_free(mb, phba->mbox_mem_pool);
638 /* Setup myDID for link up if we are in pt2pt mode */
639 if (phba->pport->fc_flag & FC_PT2PT) {
640 phba->pport->fc_myDID = 0;
641 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
643 lpfc_config_link(phba, mb);
644 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
646 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
647 == MBX_NOT_FINISHED) {
648 mempool_free(mb, phba->mbox_mem_pool);
651 spin_lock_irq(shost->host_lock);
652 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
653 spin_unlock_irq(shost->host_lock);
660 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
662 struct lpfc_nodelist *ndlp;
664 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
665 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
668 if (ndlp->nlp_type & NLP_FABRIC) {
669 /* On Linkup its safe to clean up the ndlp
670 * from Fabric connections.
672 if (ndlp->nlp_DID != Fabric_DID)
673 lpfc_unreg_rpi(vport, ndlp);
674 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
675 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
676 /* Fail outstanding IO now since device is
679 lpfc_unreg_rpi(vport, ndlp);
685 lpfc_linkup_port(struct lpfc_vport *vport)
687 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
688 struct lpfc_nodelist *ndlp, *next_ndlp;
689 struct lpfc_hba *phba = vport->phba;
691 if ((vport->load_flag & FC_UNLOADING) != 0)
694 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
695 "Link Up: top:x%x speed:x%x flg:x%x",
696 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
698 /* If NPIV is not enabled, only bring the physical port up */
699 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
700 (vport != phba->pport))
703 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
705 spin_lock_irq(shost->host_lock);
706 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
707 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
708 vport->fc_flag |= FC_NDISC_ACTIVE;
709 vport->fc_ns_retry = 0;
710 spin_unlock_irq(shost->host_lock);
712 if (vport->fc_flag & FC_LBIT)
713 lpfc_linkup_cleanup_nodes(vport);
715 /* free any ndlp's in unused state */
716 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
718 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
719 lpfc_drop_node(vport, ndlp);
723 lpfc_linkup(struct lpfc_hba *phba)
725 struct lpfc_vport **vports;
728 phba->link_state = LPFC_LINK_UP;
730 /* Unblock fabric iocbs if they are blocked */
731 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
732 del_timer_sync(&phba->fabric_block_timer);
734 vports = lpfc_create_vport_work_array(phba);
736 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
737 lpfc_linkup_port(vports[i]);
738 lpfc_destroy_vport_work_array(vports);
739 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
740 lpfc_issue_clear_la(phba, phba->pport);
746 * This routine handles processing a CLEAR_LA mailbox
747 * command upon completion. It is setup in the LPFC_MBOXQ
748 * as the completion routine when the command is
749 * handed off to the SLI layer.
752 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
754 struct lpfc_vport *vport = pmb->vport;
755 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
756 struct lpfc_sli *psli = &phba->sli;
757 MAILBOX_t *mb = &pmb->mb;
760 /* Since we don't do discovery right now, turn these off here */
761 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
762 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
763 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
765 /* Check for error */
766 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
767 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
768 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
769 "0320 CLEAR_LA mbxStatus error x%x hba "
771 mb->mbxStatus, vport->port_state);
772 phba->link_state = LPFC_HBA_ERROR;
776 if (vport->port_type == LPFC_PHYSICAL_PORT)
777 phba->link_state = LPFC_HBA_READY;
779 spin_lock_irq(&phba->hbalock);
780 psli->sli_flag |= LPFC_PROCESS_LA;
781 control = readl(phba->HCregaddr);
782 control |= HC_LAINT_ENA;
783 writel(control, phba->HCregaddr);
784 readl(phba->HCregaddr); /* flush */
785 spin_unlock_irq(&phba->hbalock);
788 vport->num_disc_nodes = 0;
789 /* go thru NPR nodes and issue ELS PLOGIs */
790 if (vport->fc_npr_cnt)
791 lpfc_els_disc_plogi(vport);
793 if (!vport->num_disc_nodes) {
794 spin_lock_irq(shost->host_lock);
795 vport->fc_flag &= ~FC_NDISC_ACTIVE;
796 spin_unlock_irq(shost->host_lock);
799 vport->port_state = LPFC_VPORT_READY;
802 /* Device Discovery completes */
803 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
804 "0225 Device Discovery completes\n");
805 mempool_free(pmb, phba->mbox_mem_pool);
807 spin_lock_irq(shost->host_lock);
808 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
809 spin_unlock_irq(shost->host_lock);
811 del_timer_sync(&phba->fc_estabtmo);
813 lpfc_can_disctmo(vport);
815 /* turn on Link Attention interrupts */
817 spin_lock_irq(&phba->hbalock);
818 psli->sli_flag |= LPFC_PROCESS_LA;
819 control = readl(phba->HCregaddr);
820 control |= HC_LAINT_ENA;
821 writel(control, phba->HCregaddr);
822 readl(phba->HCregaddr); /* flush */
823 spin_unlock_irq(&phba->hbalock);
830 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
832 struct lpfc_vport *vport = pmb->vport;
834 if (pmb->mb.mbxStatus)
837 mempool_free(pmb, phba->mbox_mem_pool);
839 if (phba->fc_topology == TOPOLOGY_LOOP &&
840 vport->fc_flag & FC_PUBLIC_LOOP &&
841 !(vport->fc_flag & FC_LBIT)) {
842 /* Need to wait for FAN - use discovery timer
843 * for timeout. port_state is identically
844 * LPFC_LOCAL_CFG_LINK while waiting for FAN
846 lpfc_set_disctmo(vport);
850 /* Start discovery by sending a FLOGI. port_state is identically
851 * LPFC_FLOGI while waiting for FLOGI cmpl
853 if (vport->port_state != LPFC_FLOGI) {
854 vport->port_state = LPFC_FLOGI;
855 lpfc_set_disctmo(vport);
856 lpfc_initial_flogi(vport);
861 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
862 "0306 CONFIG_LINK mbxStatus error x%x "
864 pmb->mb.mbxStatus, vport->port_state);
865 mempool_free(pmb, phba->mbox_mem_pool);
869 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
870 "0200 CONFIG_LINK bad hba state x%x\n",
873 lpfc_issue_clear_la(phba, vport);
878 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
880 MAILBOX_t *mb = &pmb->mb;
881 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
882 struct lpfc_vport *vport = pmb->vport;
885 /* Check for error */
887 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
888 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
889 "0319 READ_SPARAM mbxStatus error x%x "
891 mb->mbxStatus, vport->port_state);
896 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
897 sizeof (struct serv_parm));
898 if (phba->cfg_soft_wwnn)
899 u64_to_wwn(phba->cfg_soft_wwnn,
900 vport->fc_sparam.nodeName.u.wwn);
901 if (phba->cfg_soft_wwpn)
902 u64_to_wwn(phba->cfg_soft_wwpn,
903 vport->fc_sparam.portName.u.wwn);
904 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
905 sizeof(vport->fc_nodename));
906 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
907 sizeof(vport->fc_portname));
908 if (vport->port_type == LPFC_PHYSICAL_PORT) {
909 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
910 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
913 lpfc_mbuf_free(phba, mp->virt, mp->phys);
915 mempool_free(pmb, phba->mbox_mem_pool);
919 pmb->context1 = NULL;
920 lpfc_mbuf_free(phba, mp->virt, mp->phys);
922 lpfc_issue_clear_la(phba, vport);
923 mempool_free(pmb, phba->mbox_mem_pool);
928 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
930 struct lpfc_vport *vport = phba->pport;
931 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
933 struct lpfc_dmabuf *mp;
936 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
937 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
939 spin_lock_irq(&phba->hbalock);
940 switch (la->UlnkSpeed) {
942 phba->fc_linkspeed = LA_1GHZ_LINK;
945 phba->fc_linkspeed = LA_2GHZ_LINK;
948 phba->fc_linkspeed = LA_4GHZ_LINK;
951 phba->fc_linkspeed = LA_8GHZ_LINK;
954 phba->fc_linkspeed = LA_UNKNW_LINK;
958 phba->fc_topology = la->topology;
959 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
961 if (phba->fc_topology == TOPOLOGY_LOOP) {
962 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
964 /* Get Loop Map information */
966 vport->fc_flag |= FC_LBIT;
968 vport->fc_myDID = la->granted_AL_PA;
969 i = la->un.lilpBde64.tus.f.bdeSize;
972 phba->alpa_map[0] = 0;
974 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
985 numalpa = phba->alpa_map[0];
987 while (j < numalpa) {
988 memset(un.pamap, 0, 16);
989 for (k = 1; j < numalpa; k++) {
991 phba->alpa_map[j + 1];
996 /* Link Up Event ALPA map */
997 lpfc_printf_log(phba,
1000 "1304 Link Up Event "
1001 "ALPA map Data: x%x "
1003 un.pa.wd1, un.pa.wd2,
1004 un.pa.wd3, un.pa.wd4);
1009 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
1010 if (phba->max_vpi && phba->cfg_enable_npiv &&
1011 (phba->sli_rev == 3))
1012 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
1014 vport->fc_myDID = phba->fc_pref_DID;
1015 vport->fc_flag |= FC_LBIT;
1017 spin_unlock_irq(&phba->hbalock);
1021 lpfc_read_sparam(phba, sparam_mbox, 0);
1022 sparam_mbox->vport = vport;
1023 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1024 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
1025 if (rc == MBX_NOT_FINISHED) {
1026 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
1027 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1029 mempool_free(sparam_mbox, phba->mbox_mem_pool);
1031 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1037 vport->port_state = LPFC_LOCAL_CFG_LINK;
1038 lpfc_config_link(phba, cfglink_mbox);
1039 cfglink_mbox->vport = vport;
1040 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1041 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1042 if (rc != MBX_NOT_FINISHED)
1044 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1047 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1048 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1049 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1050 vport->port_state, sparam_mbox, cfglink_mbox);
1051 lpfc_issue_clear_la(phba, vport);
1056 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1059 struct lpfc_sli *psli = &phba->sli;
1061 lpfc_linkdown(phba);
1063 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1064 spin_lock_irq(&phba->hbalock);
1065 psli->sli_flag |= LPFC_PROCESS_LA;
1066 control = readl(phba->HCregaddr);
1067 control |= HC_LAINT_ENA;
1068 writel(control, phba->HCregaddr);
1069 readl(phba->HCregaddr); /* flush */
1070 spin_unlock_irq(&phba->hbalock);
1074 * This routine handles processing a READ_LA mailbox
1075 * command upon completion. It is setup in the LPFC_MBOXQ
1076 * as the completion routine when the command is
1077 * handed off to the SLI layer.
1080 lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1082 struct lpfc_vport *vport = pmb->vport;
1083 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1085 MAILBOX_t *mb = &pmb->mb;
1086 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1088 /* Check for error */
1089 if (mb->mbxStatus) {
1090 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1091 "1307 READ_LA mbox error x%x state x%x\n",
1092 mb->mbxStatus, vport->port_state);
1093 lpfc_mbx_issue_link_down(phba);
1094 phba->link_state = LPFC_HBA_ERROR;
1095 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1098 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
1100 memcpy(&phba->alpa_map[0], mp->virt, 128);
1102 spin_lock_irq(shost->host_lock);
1104 vport->fc_flag |= FC_BYPASSED_MODE;
1106 vport->fc_flag &= ~FC_BYPASSED_MODE;
1107 spin_unlock_irq(shost->host_lock);
1109 if (((phba->fc_eventTag + 1) < la->eventTag) ||
1110 (phba->fc_eventTag == la->eventTag)) {
1111 phba->fc_stat.LinkMultiEvent++;
1112 if (la->attType == AT_LINK_UP)
1113 if (phba->fc_eventTag != 0)
1114 lpfc_linkdown(phba);
1117 phba->fc_eventTag = la->eventTag;
1119 if (la->attType == AT_LINK_UP) {
1120 phba->fc_stat.LinkUp++;
1121 if (phba->link_flag & LS_LOOPBACK_MODE) {
1122 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1123 "1306 Link Up Event in loop back mode "
1124 "x%x received Data: x%x x%x x%x x%x\n",
1125 la->eventTag, phba->fc_eventTag,
1126 la->granted_AL_PA, la->UlnkSpeed,
1129 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1130 "1303 Link Up Event x%x received "
1131 "Data: x%x x%x x%x x%x\n",
1132 la->eventTag, phba->fc_eventTag,
1133 la->granted_AL_PA, la->UlnkSpeed,
1136 lpfc_mbx_process_link_up(phba, la);
1138 phba->fc_stat.LinkDown++;
1139 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1140 "1305 Link Down Event x%x received "
1141 "Data: x%x x%x x%x\n",
1142 la->eventTag, phba->fc_eventTag,
1143 phba->pport->port_state, vport->fc_flag);
1144 lpfc_mbx_issue_link_down(phba);
1147 lpfc_mbx_cmpl_read_la_free_mbuf:
1148 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1150 mempool_free(pmb, phba->mbox_mem_pool);
1155 * This routine handles processing a REG_LOGIN mailbox
1156 * command upon completion. It is setup in the LPFC_MBOXQ
1157 * as the completion routine when the command is
1158 * handed off to the SLI layer.
1161 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1163 struct lpfc_vport *vport = pmb->vport;
1164 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1165 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1167 pmb->context1 = NULL;
1169 /* Good status, call state machine */
1170 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
1171 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1173 mempool_free(pmb, phba->mbox_mem_pool);
1180 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1182 MAILBOX_t *mb = &pmb->mb;
1183 struct lpfc_vport *vport = pmb->vport;
1184 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1186 switch (mb->mbxStatus) {
1190 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1191 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
1195 vport->unreg_vpi_cmpl = VPORT_OK;
1196 mempool_free(pmb, phba->mbox_mem_pool);
1198 * This shost reference might have been taken at the beginning of
1199 * lpfc_vport_delete()
1201 if (vport->load_flag & FC_UNLOADING)
1202 scsi_host_put(shost);
1206 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1208 struct lpfc_hba *phba = vport->phba;
1212 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1216 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1217 mbox->vport = vport;
1218 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1219 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1220 if (rc == MBX_NOT_FINISHED) {
1221 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1222 "1800 Could not issue unreg_vpi\n");
1223 mempool_free(mbox, phba->mbox_mem_pool);
1224 vport->unreg_vpi_cmpl = VPORT_ERROR;
1229 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1231 struct lpfc_vport *vport = pmb->vport;
1232 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1233 MAILBOX_t *mb = &pmb->mb;
1235 switch (mb->mbxStatus) {
1239 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1240 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
1242 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1243 spin_lock_irq(shost->host_lock);
1244 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1245 spin_unlock_irq(shost->host_lock);
1246 vport->fc_myDID = 0;
1250 vport->num_disc_nodes = 0;
1251 /* go thru NPR list and issue ELS PLOGIs */
1252 if (vport->fc_npr_cnt)
1253 lpfc_els_disc_plogi(vport);
1255 if (!vport->num_disc_nodes) {
1256 spin_lock_irq(shost->host_lock);
1257 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1258 spin_unlock_irq(shost->host_lock);
1259 lpfc_can_disctmo(vport);
1261 vport->port_state = LPFC_VPORT_READY;
1264 mempool_free(pmb, phba->mbox_mem_pool);
1269 * This routine handles processing a Fabric REG_LOGIN mailbox
1270 * command upon completion. It is setup in the LPFC_MBOXQ
1271 * as the completion routine when the command is
1272 * handed off to the SLI layer.
1275 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1277 struct lpfc_vport *vport = pmb->vport;
1278 MAILBOX_t *mb = &pmb->mb;
1279 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1280 struct lpfc_nodelist *ndlp;
1281 struct lpfc_vport **vports;
1284 ndlp = (struct lpfc_nodelist *) pmb->context2;
1285 pmb->context1 = NULL;
1286 pmb->context2 = NULL;
1287 if (mb->mbxStatus) {
1288 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1290 mempool_free(pmb, phba->mbox_mem_pool);
1293 if (phba->fc_topology == TOPOLOGY_LOOP) {
1294 /* FLOGI failed, use loop map to make discovery list */
1295 lpfc_disc_list_loopmap(vport);
1297 /* Start discovery */
1298 lpfc_disc_start(vport);
1302 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1303 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1304 "0258 Register Fabric login error: 0x%x\n",
1309 ndlp->nlp_rpi = mb->un.varWords[0];
1310 ndlp->nlp_type |= NLP_FABRIC;
1311 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1313 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
1315 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1316 vports = lpfc_create_vport_work_array(phba);
1319 i < LPFC_MAX_VPORTS && vports[i] != NULL;
1321 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1323 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1324 lpfc_initial_fdisc(vports[i]);
1325 else if (phba->sli3_options &
1326 LPFC_SLI3_NPIV_ENABLED) {
1327 lpfc_vport_set_state(vports[i],
1328 FC_VPORT_NO_FABRIC_SUPP);
1329 lpfc_printf_vlog(vport, KERN_ERR,
1332 "Fabric support\n");
1335 lpfc_destroy_vport_work_array(vports);
1336 lpfc_do_scr_ns_plogi(phba, vport);
1339 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1341 mempool_free(pmb, phba->mbox_mem_pool);
1346 * This routine handles processing a NameServer REG_LOGIN mailbox
1347 * command upon completion. It is setup in the LPFC_MBOXQ
1348 * as the completion routine when the command is
1349 * handed off to the SLI layer.
1352 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1354 MAILBOX_t *mb = &pmb->mb;
1355 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1356 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1357 struct lpfc_vport *vport = pmb->vport;
1359 if (mb->mbxStatus) {
1362 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1364 mempool_free(pmb, phba->mbox_mem_pool);
1365 lpfc_drop_node(vport, ndlp);
1367 if (phba->fc_topology == TOPOLOGY_LOOP) {
1369 * RegLogin failed, use loop map to make discovery
1372 lpfc_disc_list_loopmap(vport);
1374 /* Start discovery */
1375 lpfc_disc_start(vport);
1378 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1379 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1380 "0260 Register NameServer error: 0x%x\n",
1385 pmb->context1 = NULL;
1387 ndlp->nlp_rpi = mb->un.varWords[0];
1388 ndlp->nlp_type |= NLP_FABRIC;
1389 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1391 if (vport->port_state < LPFC_VPORT_READY) {
1392 /* Link up discovery requires Fabric registration. */
1393 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1394 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1395 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1396 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1397 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1399 /* Issue SCR just before NameServer GID_FT Query */
1400 lpfc_issue_els_scr(vport, SCR_DID, 0);
1403 vport->fc_ns_retry = 0;
1404 /* Good status, issue CT Request to NameServer */
1405 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1406 /* Cannot issue NameServer Query, so finish up discovery */
1411 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1413 mempool_free(pmb, phba->mbox_mem_pool);
1419 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1421 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1422 struct fc_rport *rport;
1423 struct lpfc_rport_data *rdata;
1424 struct fc_rport_identifiers rport_ids;
1425 struct lpfc_hba *phba = vport->phba;
1427 /* Remote port has reappeared. Re-register w/ FC transport */
1428 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1429 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1430 rport_ids.port_id = ndlp->nlp_DID;
1431 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1434 * We leave our node pointer in rport->dd_data when we unregister a
1435 * FCP target port. But fc_remote_port_add zeros the space to which
1436 * rport->dd_data points. So, if we're reusing a previously
1437 * registered port, drop the reference that we took the last time we
1438 * registered the port.
1440 if (ndlp->rport && ndlp->rport->dd_data &&
1441 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1445 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1446 "rport add: did:x%x flg:x%x type x%x",
1447 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1449 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
1450 if (!rport || !get_device(&rport->dev)) {
1451 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1452 "Warning: fc_remote_port_add failed\n");
1456 /* initialize static port data */
1457 rport->maxframe_size = ndlp->nlp_maxframe;
1458 rport->supported_classes = ndlp->nlp_class_sup;
1459 rdata = rport->dd_data;
1460 rdata->pnode = lpfc_nlp_get(ndlp);
1462 if (ndlp->nlp_type & NLP_FCP_TARGET)
1463 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1464 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1465 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1468 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1469 fc_remote_port_rolechg(rport, rport_ids.roles);
1471 if ((rport->scsi_target_id != -1) &&
1472 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1473 ndlp->nlp_sid = rport->scsi_target_id;
1479 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1481 struct fc_rport *rport = ndlp->rport;
1483 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1484 "rport delete: did:x%x flg:x%x type x%x",
1485 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1487 fc_remote_port_delete(rport);
1493 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
1495 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1497 spin_lock_irq(shost->host_lock);
1499 case NLP_STE_UNUSED_NODE:
1500 vport->fc_unused_cnt += count;
1502 case NLP_STE_PLOGI_ISSUE:
1503 vport->fc_plogi_cnt += count;
1505 case NLP_STE_ADISC_ISSUE:
1506 vport->fc_adisc_cnt += count;
1508 case NLP_STE_REG_LOGIN_ISSUE:
1509 vport->fc_reglogin_cnt += count;
1511 case NLP_STE_PRLI_ISSUE:
1512 vport->fc_prli_cnt += count;
1514 case NLP_STE_UNMAPPED_NODE:
1515 vport->fc_unmap_cnt += count;
1517 case NLP_STE_MAPPED_NODE:
1518 vport->fc_map_cnt += count;
1520 case NLP_STE_NPR_NODE:
1521 vport->fc_npr_cnt += count;
1524 spin_unlock_irq(shost->host_lock);
1528 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1529 int old_state, int new_state)
1531 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1533 if (new_state == NLP_STE_UNMAPPED_NODE) {
1534 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1535 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1536 ndlp->nlp_type |= NLP_FC_NODE;
1538 if (new_state == NLP_STE_MAPPED_NODE)
1539 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1540 if (new_state == NLP_STE_NPR_NODE)
1541 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1543 /* Transport interface */
1544 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1545 old_state == NLP_STE_UNMAPPED_NODE)) {
1546 vport->phba->nport_event_cnt++;
1547 lpfc_unregister_remote_port(ndlp);
1550 if (new_state == NLP_STE_MAPPED_NODE ||
1551 new_state == NLP_STE_UNMAPPED_NODE) {
1552 vport->phba->nport_event_cnt++;
1554 * Tell the fc transport about the port, if we haven't
1555 * already. If we have, and it's a scsi entity, be
1556 * sure to unblock any attached scsi devices
1558 lpfc_register_remote_port(vport, ndlp);
1561 * if we added to Mapped list, but the remote port
1562 * registration failed or assigned a target id outside
1563 * our presentable range - move the node to the
1566 if (new_state == NLP_STE_MAPPED_NODE &&
1568 ndlp->rport->scsi_target_id == -1 ||
1569 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1570 spin_lock_irq(shost->host_lock);
1571 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1572 spin_unlock_irq(shost->host_lock);
1573 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1578 lpfc_nlp_state_name(char *buffer, size_t size, int state)
1580 static char *states[] = {
1581 [NLP_STE_UNUSED_NODE] = "UNUSED",
1582 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1583 [NLP_STE_ADISC_ISSUE] = "ADISC",
1584 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1585 [NLP_STE_PRLI_ISSUE] = "PRLI",
1586 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1587 [NLP_STE_MAPPED_NODE] = "MAPPED",
1588 [NLP_STE_NPR_NODE] = "NPR",
1591 if (state < NLP_STE_MAX_STATE && states[state])
1592 strlcpy(buffer, states[state], size);
1594 snprintf(buffer, size, "unknown (%d)", state);
1599 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1602 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1603 int old_state = ndlp->nlp_state;
1604 char name1[16], name2[16];
1606 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1607 "0904 NPort state transition x%06x, %s -> %s\n",
1609 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1610 lpfc_nlp_state_name(name2, sizeof(name2), state));
1612 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1613 "node statechg did:x%x old:%d ste:%d",
1614 ndlp->nlp_DID, old_state, state);
1616 if (old_state == NLP_STE_NPR_NODE &&
1617 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1618 state != NLP_STE_NPR_NODE)
1619 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1620 if (old_state == NLP_STE_UNMAPPED_NODE) {
1621 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1622 ndlp->nlp_type &= ~NLP_FC_NODE;
1625 if (list_empty(&ndlp->nlp_listp)) {
1626 spin_lock_irq(shost->host_lock);
1627 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1628 spin_unlock_irq(shost->host_lock);
1629 } else if (old_state)
1630 lpfc_nlp_counters(vport, old_state, -1);
1632 ndlp->nlp_state = state;
1633 lpfc_nlp_counters(vport, state, 1);
1634 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
1638 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1640 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1642 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1643 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1644 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1645 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1646 spin_lock_irq(shost->host_lock);
1647 list_del_init(&ndlp->nlp_listp);
1648 spin_unlock_irq(shost->host_lock);
1649 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1650 NLP_STE_UNUSED_NODE);
1654 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1656 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1661 * Start / ReStart rescue timer for Discovery / RSCN handling
1664 lpfc_set_disctmo(struct lpfc_vport *vport)
1666 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1667 struct lpfc_hba *phba = vport->phba;
1670 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
1671 /* For FAN, timeout should be greater then edtov */
1672 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1674 /* Normal discovery timeout should be > then ELS/CT timeout
1675 * FC spec states we need 3 * ratov for CT requests
1677 tmo = ((phba->fc_ratov * 3) + 3);
1681 if (!timer_pending(&vport->fc_disctmo)) {
1682 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1683 "set disc timer: tmo:x%x state:x%x flg:x%x",
1684 tmo, vport->port_state, vport->fc_flag);
1687 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1688 spin_lock_irq(shost->host_lock);
1689 vport->fc_flag |= FC_DISC_TMO;
1690 spin_unlock_irq(shost->host_lock);
1692 /* Start Discovery Timer state <hba_state> */
1693 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1694 "0247 Start Discovery Timer state x%x "
1695 "Data: x%x x%lx x%x x%x\n",
1696 vport->port_state, tmo,
1697 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1698 vport->fc_adisc_cnt);
1704 * Cancel rescue timer for Discovery / RSCN handling
1707 lpfc_can_disctmo(struct lpfc_vport *vport)
1709 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1710 unsigned long iflags;
1712 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1713 "can disc timer: state:x%x rtry:x%x flg:x%x",
1714 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1716 /* Turn off discovery timer if its running */
1717 if (vport->fc_flag & FC_DISC_TMO) {
1718 spin_lock_irqsave(shost->host_lock, iflags);
1719 vport->fc_flag &= ~FC_DISC_TMO;
1720 spin_unlock_irqrestore(shost->host_lock, iflags);
1721 del_timer_sync(&vport->fc_disctmo);
1722 spin_lock_irqsave(&vport->work_port_lock, iflags);
1723 vport->work_port_events &= ~WORKER_DISC_TMO;
1724 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
1727 /* Cancel Discovery Timer state <hba_state> */
1728 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1729 "0248 Cancel Discovery Timer state x%x "
1730 "Data: x%x x%x x%x\n",
1731 vport->port_state, vport->fc_flag,
1732 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
1737 * Check specified ring for outstanding IOCB on the SLI queue
1738 * Return true if iocb matches the specified nport
1741 lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1742 struct lpfc_sli_ring *pring,
1743 struct lpfc_iocbq *iocb,
1744 struct lpfc_nodelist *ndlp)
1746 struct lpfc_sli *psli = &phba->sli;
1747 IOCB_t *icmd = &iocb->iocb;
1748 struct lpfc_vport *vport = ndlp->vport;
1750 if (iocb->vport != vport)
1753 if (pring->ringno == LPFC_ELS_RING) {
1754 switch (icmd->ulpCommand) {
1755 case CMD_GEN_REQUEST64_CR:
1756 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1758 case CMD_ELS_REQUEST64_CR:
1759 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1761 case CMD_XMIT_ELS_RSP64_CX:
1762 if (iocb->context1 == (uint8_t *) ndlp)
1765 } else if (pring->ringno == psli->extra_ring) {
1767 } else if (pring->ringno == psli->fcp_ring) {
1768 /* Skip match check if waiting to relogin to FCP target */
1769 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1770 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1773 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1776 } else if (pring->ringno == psli->next_ring) {
1783 * Free resources / clean up outstanding I/Os
1784 * associated with nlp_rpi in the LPFC_NODELIST entry.
1787 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1789 LIST_HEAD(completions);
1790 struct lpfc_sli *psli;
1791 struct lpfc_sli_ring *pring;
1792 struct lpfc_iocbq *iocb, *next_iocb;
1796 lpfc_fabric_abort_nport(ndlp);
1799 * Everything that matches on txcmplq will be returned
1800 * by firmware with a no rpi error.
1803 rpi = ndlp->nlp_rpi;
1805 /* Now process each ring */
1806 for (i = 0; i < psli->num_rings; i++) {
1807 pring = &psli->ring[i];
1809 spin_lock_irq(&phba->hbalock);
1810 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1813 * Check to see if iocb matches the nport we are
1816 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1818 /* It matches, so deque and call compl
1820 list_move_tail(&iocb->list,
1825 spin_unlock_irq(&phba->hbalock);
1829 while (!list_empty(&completions)) {
1830 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1831 list_del_init(&iocb->list);
1833 if (!iocb->iocb_cmpl)
1834 lpfc_sli_release_iocbq(phba, iocb);
1837 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1838 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1839 (iocb->iocb_cmpl)(phba, iocb, iocb);
1847 * Free rpi associated with LPFC_NODELIST entry.
1848 * This routine is called from lpfc_freenode(), when we are removing
1849 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1850 * LOGO that completes successfully, and we are waiting to PLOGI back
1851 * to the remote NPort. In addition, it is called after we receive
1852 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1853 * we are waiting to PLOGI back to the remote NPort.
1856 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1858 struct lpfc_hba *phba = vport->phba;
1862 if (ndlp->nlp_rpi) {
1863 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1865 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1866 mbox->vport = vport;
1867 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1869 if (rc == MBX_NOT_FINISHED)
1870 mempool_free(mbox, phba->mbox_mem_pool);
1872 lpfc_no_rpi(phba, ndlp);
1880 lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1882 struct lpfc_hba *phba = vport->phba;
1886 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1888 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1889 mbox->vport = vport;
1890 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1891 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1892 if (rc == MBX_NOT_FINISHED) {
1893 mempool_free(mbox, phba->mbox_mem_pool);
1899 lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1901 struct lpfc_hba *phba = vport->phba;
1905 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1907 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1908 mbox->vport = vport;
1909 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1910 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1911 if (rc == MBX_NOT_FINISHED) {
1912 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1913 "1815 Could not issue "
1914 "unreg_did (default rpis)\n");
1915 mempool_free(mbox, phba->mbox_mem_pool);
1921 * Free resources associated with LPFC_NODELIST entry
1922 * so it can be freed.
1925 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1927 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1928 struct lpfc_hba *phba = vport->phba;
1929 LPFC_MBOXQ_t *mb, *nextmb;
1930 struct lpfc_dmabuf *mp;
1932 /* Cleanup node for NPort <nlp_DID> */
1933 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1934 "0900 Cleanup node for NPort x%x "
1935 "Data: x%x x%x x%x\n",
1936 ndlp->nlp_DID, ndlp->nlp_flag,
1937 ndlp->nlp_state, ndlp->nlp_rpi);
1938 lpfc_dequeue_node(vport, ndlp);
1940 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1941 if ((mb = phba->sli.mbox_active)) {
1942 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1943 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1944 mb->context2 = NULL;
1945 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1949 spin_lock_irq(&phba->hbalock);
1950 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1951 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1952 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1953 mp = (struct lpfc_dmabuf *) (mb->context1);
1955 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1958 list_del(&mb->list);
1959 mempool_free(mb, phba->mbox_mem_pool);
1963 spin_unlock_irq(&phba->hbalock);
1965 lpfc_els_abort(phba,ndlp);
1966 spin_lock_irq(shost->host_lock);
1967 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1968 spin_unlock_irq(shost->host_lock);
1970 ndlp->nlp_last_elscmd = 0;
1971 del_timer_sync(&ndlp->nlp_delayfunc);
1973 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1974 list_del_init(&ndlp->els_retry_evt.evt_listp);
1975 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
1976 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1978 if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
1979 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1980 complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
1983 lpfc_unreg_rpi(vport, ndlp);
1989 * Check to see if we can free the nlp back to the freelist.
1990 * If we are in the middle of using the nlp in the discovery state
1991 * machine, defer the free till we reach the end of the state machine.
1994 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1996 struct lpfc_rport_data *rdata;
1998 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1999 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2002 lpfc_cleanup_node(vport, ndlp);
2005 * We can get here with a non-NULL ndlp->rport because when we
2006 * unregister a rport we don't break the rport/node linkage. So if we
2007 * do, make sure we don't leaving any dangling pointers behind.
2010 rdata = ndlp->rport->dd_data;
2011 rdata->pnode = NULL;
2017 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2020 D_ID mydid, ndlpdid, matchdid;
2022 if (did == Bcast_DID)
2025 if (ndlp->nlp_DID == 0) {
2029 /* First check for Direct match */
2030 if (ndlp->nlp_DID == did)
2033 /* Next check for area/domain identically equals 0 match */
2034 mydid.un.word = vport->fc_myDID;
2035 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2039 matchdid.un.word = did;
2040 ndlpdid.un.word = ndlp->nlp_DID;
2041 if (matchdid.un.b.id == ndlpdid.un.b.id) {
2042 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
2043 (mydid.un.b.area == matchdid.un.b.area)) {
2044 if ((ndlpdid.un.b.domain == 0) &&
2045 (ndlpdid.un.b.area == 0)) {
2046 if (ndlpdid.un.b.id)
2052 matchdid.un.word = ndlp->nlp_DID;
2053 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
2054 (mydid.un.b.area == ndlpdid.un.b.area)) {
2055 if ((matchdid.un.b.domain == 0) &&
2056 (matchdid.un.b.area == 0)) {
2057 if (matchdid.un.b.id)
2065 /* Search for a nodelist entry */
2066 static struct lpfc_nodelist *
2067 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2069 struct lpfc_nodelist *ndlp;
2072 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2073 if (lpfc_matchdid(vport, ndlp, did)) {
2074 data1 = (((uint32_t) ndlp->nlp_state << 24) |
2075 ((uint32_t) ndlp->nlp_xri << 16) |
2076 ((uint32_t) ndlp->nlp_type << 8) |
2077 ((uint32_t) ndlp->nlp_rpi & 0xff));
2078 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2079 "0929 FIND node DID "
2080 "Data: x%p x%x x%x x%x\n",
2081 ndlp, ndlp->nlp_DID,
2082 ndlp->nlp_flag, data1);
2087 /* FIND node did <did> NOT FOUND */
2088 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2089 "0932 FIND node did x%x NOT FOUND.\n", did);
2093 struct lpfc_nodelist *
2094 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2096 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2097 struct lpfc_nodelist *ndlp;
2099 spin_lock_irq(shost->host_lock);
2100 ndlp = __lpfc_findnode_did(vport, did);
2101 spin_unlock_irq(shost->host_lock);
2105 struct lpfc_nodelist *
2106 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2108 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2109 struct lpfc_nodelist *ndlp;
2111 ndlp = lpfc_findnode_did(vport, did);
2113 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
2114 lpfc_rscn_payload_check(vport, did) == 0)
2116 ndlp = (struct lpfc_nodelist *)
2117 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
2120 lpfc_nlp_init(vport, ndlp, did);
2121 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2122 spin_lock_irq(shost->host_lock);
2123 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2124 spin_unlock_irq(shost->host_lock);
2127 if (vport->fc_flag & FC_RSCN_MODE) {
2128 if (lpfc_rscn_payload_check(vport, did)) {
2129 spin_lock_irq(shost->host_lock);
2130 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2131 spin_unlock_irq(shost->host_lock);
2133 /* Since this node is marked for discovery,
2134 * delay timeout is not needed.
2136 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2137 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2141 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
2142 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
2144 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2145 spin_lock_irq(shost->host_lock);
2146 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2147 spin_unlock_irq(shost->host_lock);
2152 /* Build a list of nodes to discover based on the loopmap */
2154 lpfc_disc_list_loopmap(struct lpfc_vport *vport)
2156 struct lpfc_hba *phba = vport->phba;
2158 uint32_t alpa, index;
2160 if (!lpfc_is_link_up(phba))
2163 if (phba->fc_topology != TOPOLOGY_LOOP)
2166 /* Check for loop map present or not */
2167 if (phba->alpa_map[0]) {
2168 for (j = 1; j <= phba->alpa_map[0]; j++) {
2169 alpa = phba->alpa_map[j];
2170 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
2172 lpfc_setup_disc_node(vport, alpa);
2175 /* No alpamap, so try all alpa's */
2176 for (j = 0; j < FC_MAXLOOP; j++) {
2177 /* If cfg_scan_down is set, start from highest
2178 * ALPA (0xef) to lowest (0x1).
2180 if (vport->cfg_scan_down)
2183 index = FC_MAXLOOP - j - 1;
2184 alpa = lpfcAlpaArray[index];
2185 if ((vport->fc_myDID & 0xff) == alpa)
2187 lpfc_setup_disc_node(vport, alpa);
2194 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2197 struct lpfc_sli *psli = &phba->sli;
2198 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2199 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
2200 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
2204 * if it's not a physical port or if we already send
2205 * clear_la then don't send it.
2207 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2208 (vport->port_type != LPFC_PHYSICAL_PORT))
2211 /* Link up discovery */
2212 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2213 phba->link_state = LPFC_CLEAR_LA;
2214 lpfc_clear_la(phba, mbox);
2215 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2216 mbox->vport = vport;
2217 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2218 if (rc == MBX_NOT_FINISHED) {
2219 mempool_free(mbox, phba->mbox_mem_pool);
2220 lpfc_disc_flush_list(vport);
2221 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2222 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2223 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2224 phba->link_state = LPFC_HBA_ERROR;
2229 /* Reg_vpi to tell firmware to resume normal operations */
2231 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2233 LPFC_MBOXQ_t *regvpimbox;
2235 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2237 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2238 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2239 regvpimbox->vport = vport;
2240 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
2241 == MBX_NOT_FINISHED) {
2242 mempool_free(regvpimbox, phba->mbox_mem_pool);
2247 /* Start Link up / RSCN discovery on NPR nodes */
2249 lpfc_disc_start(struct lpfc_vport *vport)
2251 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2252 struct lpfc_hba *phba = vport->phba;
2254 uint32_t clear_la_pending;
2257 if (!lpfc_is_link_up(phba))
2260 if (phba->link_state == LPFC_CLEAR_LA)
2261 clear_la_pending = 1;
2263 clear_la_pending = 0;
2265 if (vport->port_state < LPFC_VPORT_READY)
2266 vport->port_state = LPFC_DISC_AUTH;
2268 lpfc_set_disctmo(vport);
2270 if (vport->fc_prevDID == vport->fc_myDID)
2275 vport->fc_prevDID = vport->fc_myDID;
2276 vport->num_disc_nodes = 0;
2278 /* Start Discovery state <hba_state> */
2279 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2280 "0202 Start Discovery hba state x%x "
2281 "Data: x%x x%x x%x\n",
2282 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
2283 vport->fc_adisc_cnt);
2285 /* First do ADISCs - if any */
2286 num_sent = lpfc_els_disc_adisc(vport);
2292 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2293 * continue discovery.
2295 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2296 !(vport->fc_flag & FC_RSCN_MODE)) {
2297 lpfc_issue_reg_vpi(phba, vport);
2302 * For SLI2, we need to set port_state to READY and continue
2305 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
2306 /* If we get here, there is nothing to ADISC */
2307 if (vport->port_type == LPFC_PHYSICAL_PORT)
2308 lpfc_issue_clear_la(phba, vport);
2310 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2311 vport->num_disc_nodes = 0;
2312 /* go thru NPR nodes and issue ELS PLOGIs */
2313 if (vport->fc_npr_cnt)
2314 lpfc_els_disc_plogi(vport);
2316 if (!vport->num_disc_nodes) {
2317 spin_lock_irq(shost->host_lock);
2318 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2319 spin_unlock_irq(shost->host_lock);
2320 lpfc_can_disctmo(vport);
2323 vport->port_state = LPFC_VPORT_READY;
2325 /* Next do PLOGIs - if any */
2326 num_sent = lpfc_els_disc_plogi(vport);
2331 if (vport->fc_flag & FC_RSCN_MODE) {
2332 /* Check to see if more RSCNs came in while we
2333 * were processing this one.
2335 if ((vport->fc_rscn_id_cnt == 0) &&
2336 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
2337 spin_lock_irq(shost->host_lock);
2338 vport->fc_flag &= ~FC_RSCN_MODE;
2339 spin_unlock_irq(shost->host_lock);
2340 lpfc_can_disctmo(vport);
2342 lpfc_els_handle_rscn(vport);
2349 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2350 * ring the match the sppecified nodelist.
2353 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2355 LIST_HEAD(completions);
2356 struct lpfc_sli *psli;
2358 struct lpfc_iocbq *iocb, *next_iocb;
2359 struct lpfc_sli_ring *pring;
2362 pring = &psli->ring[LPFC_ELS_RING];
2364 /* Error matching iocb on txq or txcmplq
2365 * First check the txq.
2367 spin_lock_irq(&phba->hbalock);
2368 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2369 if (iocb->context1 != ndlp) {
2373 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2374 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2376 list_move_tail(&iocb->list, &completions);
2381 /* Next check the txcmplq */
2382 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2383 if (iocb->context1 != ndlp) {
2387 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2388 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2389 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2392 spin_unlock_irq(&phba->hbalock);
2394 while (!list_empty(&completions)) {
2395 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2396 list_del_init(&iocb->list);
2398 if (!iocb->iocb_cmpl)
2399 lpfc_sli_release_iocbq(phba, iocb);
2402 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2403 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2404 (iocb->iocb_cmpl) (phba, iocb, iocb);
2410 lpfc_disc_flush_list(struct lpfc_vport *vport)
2412 struct lpfc_nodelist *ndlp, *next_ndlp;
2413 struct lpfc_hba *phba = vport->phba;
2415 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2416 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2418 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2419 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2420 lpfc_free_tx(phba, ndlp);
2428 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2430 lpfc_els_flush_rscn(vport);
2431 lpfc_els_flush_cmd(vport);
2432 lpfc_disc_flush_list(vport);
2435 /*****************************************************************************/
2437 * NAME: lpfc_disc_timeout
2439 * FUNCTION: Fibre Channel driver discovery timeout routine.
2441 * EXECUTION ENVIRONMENT: interrupt only
2449 /*****************************************************************************/
2451 lpfc_disc_timeout(unsigned long ptr)
2453 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2454 struct lpfc_hba *phba = vport->phba;
2455 unsigned long flags = 0;
2457 if (unlikely(!phba))
2460 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
2461 spin_lock_irqsave(&vport->work_port_lock, flags);
2462 vport->work_port_events |= WORKER_DISC_TMO;
2463 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2465 spin_lock_irqsave(&phba->hbalock, flags);
2466 if (phba->work_wait)
2467 lpfc_worker_wake_up(phba);
2468 spin_unlock_irqrestore(&phba->hbalock, flags);
2474 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2476 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2477 struct lpfc_hba *phba = vport->phba;
2478 struct lpfc_sli *psli = &phba->sli;
2479 struct lpfc_nodelist *ndlp, *next_ndlp;
2480 LPFC_MBOXQ_t *initlinkmbox;
2481 int rc, clrlaerr = 0;
2483 if (!(vport->fc_flag & FC_DISC_TMO))
2486 spin_lock_irq(shost->host_lock);
2487 vport->fc_flag &= ~FC_DISC_TMO;
2488 spin_unlock_irq(shost->host_lock);
2490 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2491 "disc timeout: state:x%x rtry:x%x flg:x%x",
2492 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2494 switch (vport->port_state) {
2496 case LPFC_LOCAL_CFG_LINK:
2497 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2501 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
2502 "0221 FAN timeout\n");
2503 /* Start discovery by sending FLOGI, clean up old rpis */
2504 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2506 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2508 if (ndlp->nlp_type & NLP_FABRIC) {
2509 /* Clean up the ndlp on Fabric connections */
2510 lpfc_drop_node(vport, ndlp);
2511 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2512 /* Fail outstanding IO now since device
2513 * is marked for PLOGI.
2515 lpfc_unreg_rpi(vport, ndlp);
2518 if (vport->port_state != LPFC_FLOGI) {
2519 vport->port_state = LPFC_FLOGI;
2520 lpfc_set_disctmo(vport);
2521 lpfc_initial_flogi(vport);
2527 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2528 /* Initial FLOGI timeout */
2529 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2530 "0222 Initial %s timeout\n",
2531 vport->vpi ? "FLOGI" : "FDISC");
2533 /* Assume no Fabric and go on with discovery.
2534 * Check for outstanding ELS FLOGI to abort.
2537 /* FLOGI failed, so just use loop map to make discovery list */
2538 lpfc_disc_list_loopmap(vport);
2540 /* Start discovery */
2541 lpfc_disc_start(vport);
2544 case LPFC_FABRIC_CFG_LINK:
2545 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2547 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2548 "0223 Timeout while waiting for "
2549 "NameServer login\n");
2550 /* Next look for NameServer ndlp */
2551 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2554 /* Start discovery */
2555 lpfc_disc_start(vport);
2559 /* Check for wait for NameServer Rsp timeout */
2560 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2561 "0224 NameServer Query timeout "
2563 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2565 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2566 /* Try it one more time */
2567 vport->fc_ns_retry++;
2568 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2569 vport->fc_ns_retry, 0);
2573 vport->fc_ns_retry = 0;
2576 * Discovery is over.
2577 * set port_state to PORT_READY if SLI2.
2578 * cmpl_reg_vpi will set port_state to READY for SLI3.
2580 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2581 lpfc_issue_reg_vpi(phba, vport);
2582 else { /* NPIV Not enabled */
2583 lpfc_issue_clear_la(phba, vport);
2584 vport->port_state = LPFC_VPORT_READY;
2587 /* Setup and issue mailbox INITIALIZE LINK command */
2588 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2589 if (!initlinkmbox) {
2590 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2591 "0206 Device Discovery "
2592 "completion error\n");
2593 phba->link_state = LPFC_HBA_ERROR;
2597 lpfc_linkdown(phba);
2598 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2599 phba->cfg_link_speed);
2600 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2601 initlinkmbox->vport = vport;
2602 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2603 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
2604 lpfc_set_loopback_flag(phba);
2605 if (rc == MBX_NOT_FINISHED)
2606 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2610 case LPFC_DISC_AUTH:
2611 /* Node Authentication timeout */
2612 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2613 "0227 Node Authentication timeout\n");
2614 lpfc_disc_flush_list(vport);
2617 * set port_state to PORT_READY if SLI2.
2618 * cmpl_reg_vpi will set port_state to READY for SLI3.
2620 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2621 lpfc_issue_reg_vpi(phba, vport);
2622 else { /* NPIV Not enabled */
2623 lpfc_issue_clear_la(phba, vport);
2624 vport->port_state = LPFC_VPORT_READY;
2628 case LPFC_VPORT_READY:
2629 if (vport->fc_flag & FC_RSCN_MODE) {
2630 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2631 "0231 RSCN timeout Data: x%x "
2633 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2635 /* Cleanup any outstanding ELS commands */
2636 lpfc_els_flush_cmd(vport);
2638 lpfc_els_flush_rscn(vport);
2639 lpfc_disc_flush_list(vport);
2644 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2645 "0229 Unexpected discovery timeout, "
2646 "vport State x%x\n", vport->port_state);
2650 switch (phba->link_state) {
2652 /* CLEAR LA timeout */
2653 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2654 "0228 CLEAR LA timeout\n");
2658 case LPFC_LINK_UNKNOWN:
2659 case LPFC_WARM_START:
2660 case LPFC_INIT_START:
2661 case LPFC_INIT_MBX_CMDS:
2662 case LPFC_LINK_DOWN:
2664 case LPFC_HBA_ERROR:
2665 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2666 "0230 Unexpected timeout, hba link "
2667 "state x%x\n", phba->link_state);
2671 case LPFC_HBA_READY:
2676 lpfc_disc_flush_list(vport);
2677 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2678 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2679 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2680 vport->port_state = LPFC_VPORT_READY;
2687 * This routine handles processing a NameServer REG_LOGIN mailbox
2688 * command upon completion. It is setup in the LPFC_MBOXQ
2689 * as the completion routine when the command is
2690 * handed off to the SLI layer.
2693 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2695 MAILBOX_t *mb = &pmb->mb;
2696 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2697 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2698 struct lpfc_vport *vport = pmb->vport;
2700 pmb->context1 = NULL;
2702 ndlp->nlp_rpi = mb->un.varWords[0];
2703 ndlp->nlp_type |= NLP_FABRIC;
2704 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2707 * Start issuing Fabric-Device Management Interface (FDMI) command to
2708 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2709 * fdmi-on=2 (supporting RPA/hostnmae)
2712 if (vport->cfg_fdmi_on == 1)
2713 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2715 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2717 /* Mailbox took a reference to the node */
2719 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2721 mempool_free(pmb, phba->mbox_mem_pool);
2727 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2729 uint16_t *rpi = param;
2731 return ndlp->nlp_rpi == *rpi;
2735 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2737 return memcmp(&ndlp->nlp_portname, param,
2738 sizeof(ndlp->nlp_portname)) == 0;
2741 struct lpfc_nodelist *
2742 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2744 struct lpfc_nodelist *ndlp;
2746 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2747 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
2748 filter(ndlp, param))
2755 * Search node lists for a remote port matching filter criteria
2756 * Caller needs to hold host_lock before calling this routine.
2758 struct lpfc_nodelist *
2759 lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2761 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2762 struct lpfc_nodelist *ndlp;
2764 spin_lock_irq(shost->host_lock);
2765 ndlp = __lpfc_find_node(vport, filter, param);
2766 spin_unlock_irq(shost->host_lock);
2771 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2772 * returns the node list element pointer else return NULL.
2774 struct lpfc_nodelist *
2775 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2777 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2780 struct lpfc_nodelist *
2781 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2783 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2784 struct lpfc_nodelist *ndlp;
2786 spin_lock_irq(shost->host_lock);
2787 ndlp = __lpfc_findnode_rpi(vport, rpi);
2788 spin_unlock_irq(shost->host_lock);
2793 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2794 * returns the node element list pointer else return NULL.
2796 struct lpfc_nodelist *
2797 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2799 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2800 struct lpfc_nodelist *ndlp;
2802 spin_lock_irq(shost->host_lock);
2803 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2804 spin_unlock_irq(shost->host_lock);
2809 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2812 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2813 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2814 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2815 init_timer(&ndlp->nlp_delayfunc);
2816 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2817 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2818 ndlp->nlp_DID = did;
2819 ndlp->vport = vport;
2820 ndlp->nlp_sid = NLP_NO_SID;
2821 INIT_LIST_HEAD(&ndlp->nlp_listp);
2822 kref_init(&ndlp->kref);
2824 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2825 "node init: did:x%x",
2826 ndlp->nlp_DID, 0, 0);
2832 lpfc_nlp_release(struct kref *kref)
2834 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2837 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2838 "node release: did:x%x flg:x%x type:x%x",
2839 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2841 lpfc_nlp_remove(ndlp->vport, ndlp);
2842 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2845 struct lpfc_nodelist *
2846 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2849 kref_get(&ndlp->kref);
2854 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2856 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;