1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/kthread.h>
25 #include <linux/interrupt.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_vport.h"
40 #include "lpfc_debugfs.h"
42 /* AlpaArray for assignment of scsid for scan-down and bind_method */
43 static uint8_t lpfcAlpaArray[] = {
44 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
59 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
62 lpfc_terminate_rport_io(struct fc_rport *rport)
64 struct lpfc_rport_data *rdata;
65 struct lpfc_nodelist * ndlp;
66 struct lpfc_hba *phba;
68 rdata = rport->dd_data;
72 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
73 printk(KERN_ERR "Cannot find remote node"
74 " to terminate I/O Data x%x\n",
79 phba = ndlp->vport->phba;
81 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
82 "rport terminate: sid:x%x did:x%x flg:x%x",
83 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
85 if (ndlp->nlp_sid != NLP_NO_SID) {
86 lpfc_sli_abort_iocb(ndlp->vport,
87 &phba->sli.ring[phba->sli.fcp_ring],
88 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
92 * A device is normally blocked for rediscovery and unblocked when
93 * devloss timeout happens. In case a vport is removed or driver
94 * unloaded before devloss timeout happens, we need to unblock here.
96 scsi_target_unblock(&rport->dev);
101 * This function will be called when dev_loss_tmo fire.
104 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
106 struct lpfc_rport_data *rdata;
107 struct lpfc_nodelist * ndlp;
108 struct lpfc_vport *vport;
109 struct lpfc_hba *phba;
110 struct lpfc_work_evt *evtp;
112 rdata = rport->dd_data;
116 if (rport->scsi_target_id != -1) {
117 printk(KERN_ERR "Cannot find remote node"
118 " for rport in dev_loss_tmo_callbk x%x\n",
127 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
128 "rport devlosscb: sid:x%x did:x%x flg:x%x",
129 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
131 evtp = &ndlp->dev_loss_evt;
133 if (!list_empty(&evtp->evt_listp))
136 spin_lock_irq(&phba->hbalock);
137 evtp->evt_arg1 = ndlp;
138 evtp->evt = LPFC_EVT_DEV_LOSS;
139 list_add_tail(&evtp->evt_listp, &phba->work_list);
141 wake_up(phba->work_wait);
143 spin_unlock_irq(&phba->hbalock);
149 * This function is called from the worker thread when dev_loss_tmo
153 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
155 struct lpfc_rport_data *rdata;
156 struct fc_rport *rport;
157 struct lpfc_vport *vport;
158 struct lpfc_hba *phba;
169 rdata = rport->dd_data;
170 name = (uint8_t *) &ndlp->nlp_portname;
174 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
175 "rport devlosstmo:did:x%x type:x%x id:x%x",
176 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
178 if (!(vport->load_flag & FC_UNLOADING) &&
179 ndlp->nlp_state == NLP_STE_MAPPED_NODE)
182 if (ndlp->nlp_type & NLP_FABRIC) {
183 /* We will clean up these Nodes in linkup */
184 put_node = rdata->pnode != NULL;
185 put_rport = ndlp->rport != NULL;
191 put_device(&rport->dev);
195 if (ndlp->nlp_sid != NLP_NO_SID) {
197 /* flush the target */
198 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
199 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
201 if (vport->load_flag & FC_UNLOADING)
205 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
206 "0203 Devloss timeout on "
207 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
208 "NPort x%x Data: x%x x%x x%x\n",
209 *name, *(name+1), *(name+2), *(name+3),
210 *(name+4), *(name+5), *(name+6), *(name+7),
211 ndlp->nlp_DID, ndlp->nlp_flag,
212 ndlp->nlp_state, ndlp->nlp_rpi);
214 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
215 "0204 Devloss timeout on "
216 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
217 "NPort x%x Data: x%x x%x x%x\n",
218 *name, *(name+1), *(name+2), *(name+3),
219 *(name+4), *(name+5), *(name+6), *(name+7),
220 ndlp->nlp_DID, ndlp->nlp_flag,
221 ndlp->nlp_state, ndlp->nlp_rpi);
224 put_node = rdata->pnode != NULL;
225 put_rport = ndlp->rport != NULL;
231 put_device(&rport->dev);
233 if (!(vport->load_flag & FC_UNLOADING) &&
234 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
235 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
236 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) {
237 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
243 lpfc_worker_wake_up(struct lpfc_hba *phba)
245 wake_up(phba->work_wait);
250 lpfc_work_list_done(struct lpfc_hba *phba)
252 struct lpfc_work_evt *evtp = NULL;
253 struct lpfc_nodelist *ndlp;
256 spin_lock_irq(&phba->hbalock);
257 while (!list_empty(&phba->work_list)) {
258 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
260 spin_unlock_irq(&phba->hbalock);
263 case LPFC_EVT_ELS_RETRY:
264 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
265 lpfc_els_retry_delay_handler(ndlp);
266 free_evt = 0; /* evt is part of ndlp */
268 case LPFC_EVT_DEV_LOSS:
269 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
271 lpfc_dev_loss_tmo_handler(ndlp);
275 case LPFC_EVT_ONLINE:
276 if (phba->link_state < LPFC_LINK_DOWN)
277 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
279 *(int *) (evtp->evt_arg1) = 0;
280 complete((struct completion *)(evtp->evt_arg2));
282 case LPFC_EVT_OFFLINE_PREP:
283 if (phba->link_state >= LPFC_LINK_DOWN)
284 lpfc_offline_prep(phba);
285 *(int *)(evtp->evt_arg1) = 0;
286 complete((struct completion *)(evtp->evt_arg2));
288 case LPFC_EVT_OFFLINE:
290 lpfc_sli_brdrestart(phba);
291 *(int *)(evtp->evt_arg1) =
292 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
293 lpfc_unblock_mgmt_io(phba);
294 complete((struct completion *)(evtp->evt_arg2));
296 case LPFC_EVT_WARM_START:
298 lpfc_reset_barrier(phba);
299 lpfc_sli_brdreset(phba);
300 lpfc_hba_down_post(phba);
301 *(int *)(evtp->evt_arg1) =
302 lpfc_sli_brdready(phba, HS_MBRDY);
303 lpfc_unblock_mgmt_io(phba);
304 complete((struct completion *)(evtp->evt_arg2));
308 *(int *)(evtp->evt_arg1)
309 = (phba->pport->stopped)
310 ? 0 : lpfc_sli_brdkill(phba);
311 lpfc_unblock_mgmt_io(phba);
312 complete((struct completion *)(evtp->evt_arg2));
317 spin_lock_irq(&phba->hbalock);
319 spin_unlock_irq(&phba->hbalock);
324 lpfc_work_done(struct lpfc_hba *phba)
326 struct lpfc_sli_ring *pring;
327 uint32_t ha_copy, status, control, work_port_events;
328 struct lpfc_vport **vports;
329 struct lpfc_vport *vport;
332 spin_lock_irq(&phba->hbalock);
333 ha_copy = phba->work_ha;
335 spin_unlock_irq(&phba->hbalock);
337 if (ha_copy & HA_ERATT)
338 lpfc_handle_eratt(phba);
340 if (ha_copy & HA_MBATT)
341 lpfc_sli_handle_mb_event(phba);
343 if (ha_copy & HA_LATT)
344 lpfc_handle_latt(phba);
345 vports = lpfc_create_vport_work_array(phba);
347 for(i = 0; i < LPFC_MAX_VPORTS; i++) {
349 * We could have no vports in array if unloading, so if
350 * this happens then just use the pport
352 if (vports[i] == NULL && i == 0)
358 work_port_events = vport->work_port_events;
359 if (work_port_events & WORKER_DISC_TMO)
360 lpfc_disc_timeout_handler(vport);
361 if (work_port_events & WORKER_ELS_TMO)
362 lpfc_els_timeout_handler(vport);
363 if (work_port_events & WORKER_HB_TMO)
364 lpfc_hb_timeout_handler(phba);
365 if (work_port_events & WORKER_MBOX_TMO)
366 lpfc_mbox_timeout_handler(phba);
367 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
368 lpfc_unblock_fabric_iocbs(phba);
369 if (work_port_events & WORKER_FDMI_TMO)
370 lpfc_fdmi_timeout_handler(vport);
371 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
372 lpfc_ramp_down_queue_handler(phba);
373 if (work_port_events & WORKER_RAMP_UP_QUEUE)
374 lpfc_ramp_up_queue_handler(phba);
375 spin_lock_irq(&vport->work_port_lock);
376 vport->work_port_events &= ~work_port_events;
377 spin_unlock_irq(&vport->work_port_lock);
379 lpfc_destroy_vport_work_array(vports);
381 pring = &phba->sli.ring[LPFC_ELS_RING];
382 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
383 status >>= (4*LPFC_ELS_RING);
384 if ((status & HA_RXMASK)
385 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
386 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
387 pring->flag |= LPFC_DEFERRED_RING_EVENT;
389 lpfc_sli_handle_slow_ring_event(phba, pring,
392 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
395 * Turn on Ring interrupts
397 spin_lock_irq(&phba->hbalock);
398 control = readl(phba->HCregaddr);
399 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
400 lpfc_debugfs_slow_ring_trc(phba,
401 "WRK Enable ring: cntl:x%x hacopy:x%x",
402 control, ha_copy, 0);
404 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
405 writel(control, phba->HCregaddr);
406 readl(phba->HCregaddr); /* flush */
409 lpfc_debugfs_slow_ring_trc(phba,
410 "WRK Ring ok: cntl:x%x hacopy:x%x",
411 control, ha_copy, 0);
413 spin_unlock_irq(&phba->hbalock);
415 lpfc_work_list_done(phba);
419 check_work_wait_done(struct lpfc_hba *phba)
421 struct lpfc_vport *vport;
422 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
425 spin_lock_irq(&phba->hbalock);
426 list_for_each_entry(vport, &phba->port_list, listentry) {
427 if (vport->work_port_events) {
432 if (rc || phba->work_ha || (!list_empty(&phba->work_list)) ||
433 kthread_should_stop() || pring->flag & LPFC_DEFERRED_RING_EVENT) {
437 phba->work_found = 0;
438 spin_unlock_irq(&phba->hbalock);
444 lpfc_do_work(void *p)
446 struct lpfc_hba *phba = p;
448 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(work_waitq);
450 set_user_nice(current, -20);
451 phba->work_wait = &work_waitq;
452 phba->work_found = 0;
456 rc = wait_event_interruptible(work_waitq,
457 check_work_wait_done(phba));
461 if (kthread_should_stop())
464 lpfc_work_done(phba);
466 /* If there is alot of slow ring work, like during link up
467 * check_work_wait_done() may cause this thread to not give
468 * up the CPU for very long periods of time. This may cause
469 * soft lockups or other problems. To avoid these situations
470 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
471 * consecutive iterations.
473 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
474 phba->work_found = 0;
478 phba->work_wait = NULL;
483 * This is only called to handle FC worker events. Since this a rare
484 * occurance, we allocate a struct lpfc_work_evt structure here instead of
485 * embedding it in the IOCB.
488 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
491 struct lpfc_work_evt *evtp;
495 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
496 * be queued to worker thread for processing
498 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
502 evtp->evt_arg1 = arg1;
503 evtp->evt_arg2 = arg2;
506 spin_lock_irqsave(&phba->hbalock, flags);
507 list_add_tail(&evtp->evt_listp, &phba->work_list);
509 lpfc_worker_wake_up(phba);
510 spin_unlock_irqrestore(&phba->hbalock, flags);
516 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
518 struct lpfc_hba *phba = vport->phba;
519 struct lpfc_nodelist *ndlp, *next_ndlp;
522 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
523 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
526 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
527 ((vport->port_type == LPFC_NPIV_PORT) &&
528 (ndlp->nlp_DID == NameServer_DID)))
529 lpfc_unreg_rpi(vport, ndlp);
531 /* Leave Fabric nodes alone on link down */
532 if (!remove && ndlp->nlp_type & NLP_FABRIC)
534 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
537 : NLP_EVT_DEVICE_RECOVERY);
539 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
540 lpfc_mbx_unreg_vpi(vport);
541 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
546 lpfc_port_link_failure(struct lpfc_vport *vport)
548 /* Cleanup any outstanding RSCN activity */
549 lpfc_els_flush_rscn(vport);
551 /* Cleanup any outstanding ELS commands */
552 lpfc_els_flush_cmd(vport);
554 lpfc_cleanup_rpis(vport, 0);
556 /* Turn off discovery timer if its running */
557 lpfc_can_disctmo(vport);
561 lpfc_linkdown_port(struct lpfc_vport *vport)
563 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
565 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
567 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
568 "Link Down: state:x%x rtry:x%x flg:x%x",
569 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
571 lpfc_port_link_failure(vport);
576 lpfc_linkdown(struct lpfc_hba *phba)
578 struct lpfc_vport *vport = phba->pport;
579 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
580 struct lpfc_vport **vports;
584 if (phba->link_state == LPFC_LINK_DOWN) {
587 spin_lock_irq(&phba->hbalock);
588 if (phba->link_state > LPFC_LINK_DOWN) {
589 phba->link_state = LPFC_LINK_DOWN;
590 phba->pport->fc_flag &= ~FC_LBIT;
592 spin_unlock_irq(&phba->hbalock);
593 vports = lpfc_create_vport_work_array(phba);
595 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {
596 /* Issue a LINK DOWN event to all nodes */
597 lpfc_linkdown_port(vports[i]);
599 lpfc_destroy_vport_work_array(vports);
600 /* Clean up any firmware default rpi's */
601 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
603 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
605 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
606 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
607 == MBX_NOT_FINISHED) {
608 mempool_free(mb, phba->mbox_mem_pool);
612 /* Setup myDID for link up if we are in pt2pt mode */
613 if (phba->pport->fc_flag & FC_PT2PT) {
614 phba->pport->fc_myDID = 0;
615 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
617 lpfc_config_link(phba, mb);
618 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
620 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
621 == MBX_NOT_FINISHED) {
622 mempool_free(mb, phba->mbox_mem_pool);
625 spin_lock_irq(shost->host_lock);
626 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
627 spin_unlock_irq(shost->host_lock);
634 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
636 struct lpfc_nodelist *ndlp;
638 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
639 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
642 if (ndlp->nlp_type & NLP_FABRIC) {
643 /* On Linkup its safe to clean up the ndlp
644 * from Fabric connections.
646 if (ndlp->nlp_DID != Fabric_DID)
647 lpfc_unreg_rpi(vport, ndlp);
648 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
649 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
650 /* Fail outstanding IO now since device is
653 lpfc_unreg_rpi(vport, ndlp);
659 lpfc_linkup_port(struct lpfc_vport *vport)
661 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
662 struct lpfc_hba *phba = vport->phba;
664 if ((vport->load_flag & FC_UNLOADING) != 0)
667 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
668 "Link Up: top:x%x speed:x%x flg:x%x",
669 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
671 /* If NPIV is not enabled, only bring the physical port up */
672 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
673 (vport != phba->pport))
676 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
678 spin_lock_irq(shost->host_lock);
679 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
680 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
681 vport->fc_flag |= FC_NDISC_ACTIVE;
682 vport->fc_ns_retry = 0;
683 spin_unlock_irq(shost->host_lock);
685 if (vport->fc_flag & FC_LBIT)
686 lpfc_linkup_cleanup_nodes(vport);
691 lpfc_linkup(struct lpfc_hba *phba)
693 struct lpfc_vport **vports;
696 phba->link_state = LPFC_LINK_UP;
698 /* Unblock fabric iocbs if they are blocked */
699 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
700 del_timer_sync(&phba->fabric_block_timer);
702 vports = lpfc_create_vport_work_array(phba);
704 for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++)
705 lpfc_linkup_port(vports[i]);
706 lpfc_destroy_vport_work_array(vports);
707 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
708 lpfc_issue_clear_la(phba, phba->pport);
714 * This routine handles processing a CLEAR_LA mailbox
715 * command upon completion. It is setup in the LPFC_MBOXQ
716 * as the completion routine when the command is
717 * handed off to the SLI layer.
720 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
722 struct lpfc_vport *vport = pmb->vport;
723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
724 struct lpfc_sli *psli = &phba->sli;
725 MAILBOX_t *mb = &pmb->mb;
728 /* Since we don't do discovery right now, turn these off here */
729 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
730 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
731 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
733 /* Check for error */
734 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
735 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
736 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
737 "0320 CLEAR_LA mbxStatus error x%x hba "
739 mb->mbxStatus, vport->port_state);
740 phba->link_state = LPFC_HBA_ERROR;
744 if (vport->port_type == LPFC_PHYSICAL_PORT)
745 phba->link_state = LPFC_HBA_READY;
747 spin_lock_irq(&phba->hbalock);
748 psli->sli_flag |= LPFC_PROCESS_LA;
749 control = readl(phba->HCregaddr);
750 control |= HC_LAINT_ENA;
751 writel(control, phba->HCregaddr);
752 readl(phba->HCregaddr); /* flush */
753 spin_unlock_irq(&phba->hbalock);
756 vport->num_disc_nodes = 0;
757 /* go thru NPR nodes and issue ELS PLOGIs */
758 if (vport->fc_npr_cnt)
759 lpfc_els_disc_plogi(vport);
761 if (!vport->num_disc_nodes) {
762 spin_lock_irq(shost->host_lock);
763 vport->fc_flag &= ~FC_NDISC_ACTIVE;
764 spin_unlock_irq(shost->host_lock);
767 vport->port_state = LPFC_VPORT_READY;
770 /* Device Discovery completes */
771 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
772 "0225 Device Discovery completes\n");
773 mempool_free(pmb, phba->mbox_mem_pool);
775 spin_lock_irq(shost->host_lock);
776 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
777 spin_unlock_irq(shost->host_lock);
779 del_timer_sync(&phba->fc_estabtmo);
781 lpfc_can_disctmo(vport);
783 /* turn on Link Attention interrupts */
785 spin_lock_irq(&phba->hbalock);
786 psli->sli_flag |= LPFC_PROCESS_LA;
787 control = readl(phba->HCregaddr);
788 control |= HC_LAINT_ENA;
789 writel(control, phba->HCregaddr);
790 readl(phba->HCregaddr); /* flush */
791 spin_unlock_irq(&phba->hbalock);
798 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
800 struct lpfc_vport *vport = pmb->vport;
802 if (pmb->mb.mbxStatus)
805 mempool_free(pmb, phba->mbox_mem_pool);
807 if (phba->fc_topology == TOPOLOGY_LOOP &&
808 vport->fc_flag & FC_PUBLIC_LOOP &&
809 !(vport->fc_flag & FC_LBIT)) {
810 /* Need to wait for FAN - use discovery timer
811 * for timeout. port_state is identically
812 * LPFC_LOCAL_CFG_LINK while waiting for FAN
814 lpfc_set_disctmo(vport);
818 /* Start discovery by sending a FLOGI. port_state is identically
819 * LPFC_FLOGI while waiting for FLOGI cmpl
821 if (vport->port_state != LPFC_FLOGI) {
822 lpfc_initial_flogi(vport);
827 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
828 "0306 CONFIG_LINK mbxStatus error x%x "
830 pmb->mb.mbxStatus, vport->port_state);
831 mempool_free(pmb, phba->mbox_mem_pool);
835 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
836 "0200 CONFIG_LINK bad hba state x%x\n",
839 lpfc_issue_clear_la(phba, vport);
844 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
846 MAILBOX_t *mb = &pmb->mb;
847 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
848 struct lpfc_vport *vport = pmb->vport;
851 /* Check for error */
853 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
854 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
855 "0319 READ_SPARAM mbxStatus error x%x "
857 mb->mbxStatus, vport->port_state);
862 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
863 sizeof (struct serv_parm));
864 if (phba->cfg_soft_wwnn)
865 u64_to_wwn(phba->cfg_soft_wwnn,
866 vport->fc_sparam.nodeName.u.wwn);
867 if (phba->cfg_soft_wwpn)
868 u64_to_wwn(phba->cfg_soft_wwpn,
869 vport->fc_sparam.portName.u.wwn);
870 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
871 sizeof(vport->fc_nodename));
872 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
873 sizeof(vport->fc_portname));
874 if (vport->port_type == LPFC_PHYSICAL_PORT) {
875 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
876 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
879 lpfc_mbuf_free(phba, mp->virt, mp->phys);
881 mempool_free(pmb, phba->mbox_mem_pool);
885 pmb->context1 = NULL;
886 lpfc_mbuf_free(phba, mp->virt, mp->phys);
888 lpfc_issue_clear_la(phba, vport);
889 mempool_free(pmb, phba->mbox_mem_pool);
894 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
896 struct lpfc_vport *vport = phba->pport;
897 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
899 struct lpfc_dmabuf *mp;
902 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
903 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
905 spin_lock_irq(&phba->hbalock);
906 switch (la->UlnkSpeed) {
908 phba->fc_linkspeed = LA_1GHZ_LINK;
911 phba->fc_linkspeed = LA_2GHZ_LINK;
914 phba->fc_linkspeed = LA_4GHZ_LINK;
917 phba->fc_linkspeed = LA_8GHZ_LINK;
920 phba->fc_linkspeed = LA_UNKNW_LINK;
924 phba->fc_topology = la->topology;
925 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
927 if (phba->fc_topology == TOPOLOGY_LOOP) {
928 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
930 /* Get Loop Map information */
932 vport->fc_flag |= FC_LBIT;
934 vport->fc_myDID = la->granted_AL_PA;
935 i = la->un.lilpBde64.tus.f.bdeSize;
938 phba->alpa_map[0] = 0;
940 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
951 numalpa = phba->alpa_map[0];
953 while (j < numalpa) {
954 memset(un.pamap, 0, 16);
955 for (k = 1; j < numalpa; k++) {
957 phba->alpa_map[j + 1];
962 /* Link Up Event ALPA map */
963 lpfc_printf_log(phba,
966 "1304 Link Up Event "
967 "ALPA map Data: x%x "
969 un.pa.wd1, un.pa.wd2,
970 un.pa.wd3, un.pa.wd4);
975 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
976 if (phba->max_vpi && phba->cfg_enable_npiv &&
977 (phba->sli_rev == 3))
978 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
980 vport->fc_myDID = phba->fc_pref_DID;
981 vport->fc_flag |= FC_LBIT;
983 spin_unlock_irq(&phba->hbalock);
987 lpfc_read_sparam(phba, sparam_mbox, 0);
988 sparam_mbox->vport = vport;
989 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
990 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
991 if (rc == MBX_NOT_FINISHED) {
992 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
993 lpfc_mbuf_free(phba, mp->virt, mp->phys);
995 mempool_free(sparam_mbox, phba->mbox_mem_pool);
997 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1003 vport->port_state = LPFC_LOCAL_CFG_LINK;
1004 lpfc_config_link(phba, cfglink_mbox);
1005 cfglink_mbox->vport = vport;
1006 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
1007 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
1008 if (rc != MBX_NOT_FINISHED)
1010 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
1013 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1014 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1015 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1016 vport->port_state, sparam_mbox, cfglink_mbox);
1017 lpfc_issue_clear_la(phba, vport);
1022 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1025 struct lpfc_sli *psli = &phba->sli;
1027 lpfc_linkdown(phba);
1029 /* turn on Link Attention interrupts - no CLEAR_LA needed */
1030 spin_lock_irq(&phba->hbalock);
1031 psli->sli_flag |= LPFC_PROCESS_LA;
1032 control = readl(phba->HCregaddr);
1033 control |= HC_LAINT_ENA;
1034 writel(control, phba->HCregaddr);
1035 readl(phba->HCregaddr); /* flush */
1036 spin_unlock_irq(&phba->hbalock);
1040 * This routine handles processing a READ_LA mailbox
1041 * command upon completion. It is setup in the LPFC_MBOXQ
1042 * as the completion routine when the command is
1043 * handed off to the SLI layer.
1046 lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1048 struct lpfc_vport *vport = pmb->vport;
1049 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1051 MAILBOX_t *mb = &pmb->mb;
1052 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1054 /* Check for error */
1055 if (mb->mbxStatus) {
1056 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1057 "1307 READ_LA mbox error x%x state x%x\n",
1058 mb->mbxStatus, vport->port_state);
1059 lpfc_mbx_issue_link_down(phba);
1060 phba->link_state = LPFC_HBA_ERROR;
1061 goto lpfc_mbx_cmpl_read_la_free_mbuf;
1064 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
1066 memcpy(&phba->alpa_map[0], mp->virt, 128);
1068 spin_lock_irq(shost->host_lock);
1070 vport->fc_flag |= FC_BYPASSED_MODE;
1072 vport->fc_flag &= ~FC_BYPASSED_MODE;
1073 spin_unlock_irq(shost->host_lock);
1075 if (((phba->fc_eventTag + 1) < la->eventTag) ||
1076 (phba->fc_eventTag == la->eventTag)) {
1077 phba->fc_stat.LinkMultiEvent++;
1078 if (la->attType == AT_LINK_UP)
1079 if (phba->fc_eventTag != 0)
1080 lpfc_linkdown(phba);
1083 phba->fc_eventTag = la->eventTag;
1085 if (la->attType == AT_LINK_UP) {
1086 phba->fc_stat.LinkUp++;
1087 if (phba->link_flag & LS_LOOPBACK_MODE) {
1088 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1089 "1306 Link Up Event in loop back mode "
1090 "x%x received Data: x%x x%x x%x x%x\n",
1091 la->eventTag, phba->fc_eventTag,
1092 la->granted_AL_PA, la->UlnkSpeed,
1095 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1096 "1303 Link Up Event x%x received "
1097 "Data: x%x x%x x%x x%x\n",
1098 la->eventTag, phba->fc_eventTag,
1099 la->granted_AL_PA, la->UlnkSpeed,
1102 lpfc_mbx_process_link_up(phba, la);
1104 phba->fc_stat.LinkDown++;
1105 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1106 "1305 Link Down Event x%x received "
1107 "Data: x%x x%x x%x\n",
1108 la->eventTag, phba->fc_eventTag,
1109 phba->pport->port_state, vport->fc_flag);
1110 lpfc_mbx_issue_link_down(phba);
1113 lpfc_mbx_cmpl_read_la_free_mbuf:
1114 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1116 mempool_free(pmb, phba->mbox_mem_pool);
1121 * This routine handles processing a REG_LOGIN mailbox
1122 * command upon completion. It is setup in the LPFC_MBOXQ
1123 * as the completion routine when the command is
1124 * handed off to the SLI layer.
1127 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1129 struct lpfc_vport *vport = pmb->vport;
1130 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1131 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1133 pmb->context1 = NULL;
1135 /* Good status, call state machine */
1136 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
1137 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1139 mempool_free(pmb, phba->mbox_mem_pool);
1146 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1148 MAILBOX_t *mb = &pmb->mb;
1149 struct lpfc_vport *vport = pmb->vport;
1150 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1152 switch (mb->mbxStatus) {
1156 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1157 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
1161 vport->unreg_vpi_cmpl = VPORT_OK;
1162 mempool_free(pmb, phba->mbox_mem_pool);
1164 * This shost reference might have been taken at the beginning of
1165 * lpfc_vport_delete()
1167 if (vport->load_flag & FC_UNLOADING)
1168 scsi_host_put(shost);
1172 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1174 struct lpfc_hba *phba = vport->phba;
1178 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1182 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1183 mbox->vport = vport;
1184 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1185 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1186 if (rc == MBX_NOT_FINISHED) {
1187 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1188 "1800 Could not issue unreg_vpi\n");
1189 mempool_free(mbox, phba->mbox_mem_pool);
1190 vport->unreg_vpi_cmpl = VPORT_ERROR;
1195 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1197 struct lpfc_vport *vport = pmb->vport;
1198 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1199 MAILBOX_t *mb = &pmb->mb;
1201 switch (mb->mbxStatus) {
1205 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1206 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
1208 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1209 spin_lock_irq(shost->host_lock);
1210 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1211 spin_unlock_irq(shost->host_lock);
1212 vport->fc_myDID = 0;
1216 vport->num_disc_nodes = 0;
1217 /* go thru NPR list and issue ELS PLOGIs */
1218 if (vport->fc_npr_cnt)
1219 lpfc_els_disc_plogi(vport);
1221 if (!vport->num_disc_nodes) {
1222 spin_lock_irq(shost->host_lock);
1223 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1224 spin_unlock_irq(shost->host_lock);
1225 lpfc_can_disctmo(vport);
1227 vport->port_state = LPFC_VPORT_READY;
1230 mempool_free(pmb, phba->mbox_mem_pool);
1235 * This routine handles processing a Fabric REG_LOGIN mailbox
1236 * command upon completion. It is setup in the LPFC_MBOXQ
1237 * as the completion routine when the command is
1238 * handed off to the SLI layer.
1241 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1243 struct lpfc_vport *vport = pmb->vport;
1244 MAILBOX_t *mb = &pmb->mb;
1245 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1246 struct lpfc_nodelist *ndlp;
1247 struct lpfc_vport **vports;
1250 ndlp = (struct lpfc_nodelist *) pmb->context2;
1251 pmb->context1 = NULL;
1252 pmb->context2 = NULL;
1253 if (mb->mbxStatus) {
1254 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1256 mempool_free(pmb, phba->mbox_mem_pool);
1259 if (phba->fc_topology == TOPOLOGY_LOOP) {
1260 /* FLOGI failed, use loop map to make discovery list */
1261 lpfc_disc_list_loopmap(vport);
1263 /* Start discovery */
1264 lpfc_disc_start(vport);
1268 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1269 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1270 "0258 Register Fabric login error: 0x%x\n",
1275 ndlp->nlp_rpi = mb->un.varWords[0];
1276 ndlp->nlp_type |= NLP_FABRIC;
1277 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1279 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
1281 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1282 vports = lpfc_create_vport_work_array(phba);
1285 i < LPFC_MAX_VPORTS && vports[i] != NULL;
1287 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
1289 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1290 lpfc_initial_fdisc(vports[i]);
1291 else if (phba->sli3_options &
1292 LPFC_SLI3_NPIV_ENABLED) {
1293 lpfc_vport_set_state(vports[i],
1294 FC_VPORT_NO_FABRIC_SUPP);
1295 lpfc_printf_vlog(vport, KERN_ERR,
1298 "Fabric support\n");
1301 lpfc_destroy_vport_work_array(vports);
1302 lpfc_do_scr_ns_plogi(phba, vport);
1305 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1307 mempool_free(pmb, phba->mbox_mem_pool);
1312 * This routine handles processing a NameServer REG_LOGIN mailbox
1313 * command upon completion. It is setup in the LPFC_MBOXQ
1314 * as the completion routine when the command is
1315 * handed off to the SLI layer.
1318 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1320 MAILBOX_t *mb = &pmb->mb;
1321 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1322 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1323 struct lpfc_vport *vport = pmb->vport;
1325 if (mb->mbxStatus) {
1328 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1330 mempool_free(pmb, phba->mbox_mem_pool);
1332 /* If no other thread is using the ndlp, free it */
1333 lpfc_nlp_not_used(ndlp);
1335 if (phba->fc_topology == TOPOLOGY_LOOP) {
1337 * RegLogin failed, use loop map to make discovery
1340 lpfc_disc_list_loopmap(vport);
1342 /* Start discovery */
1343 lpfc_disc_start(vport);
1346 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1347 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1348 "0260 Register NameServer error: 0x%x\n",
1353 pmb->context1 = NULL;
1355 ndlp->nlp_rpi = mb->un.varWords[0];
1356 ndlp->nlp_type |= NLP_FABRIC;
1357 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1359 if (vport->port_state < LPFC_VPORT_READY) {
1360 /* Link up discovery requires Fabric registration. */
1361 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1362 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1363 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1364 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1365 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1367 /* Issue SCR just before NameServer GID_FT Query */
1368 lpfc_issue_els_scr(vport, SCR_DID, 0);
1371 vport->fc_ns_retry = 0;
1372 /* Good status, issue CT Request to NameServer */
1373 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1374 /* Cannot issue NameServer Query, so finish up discovery */
1379 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1381 mempool_free(pmb, phba->mbox_mem_pool);
1387 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1389 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1390 struct fc_rport *rport;
1391 struct lpfc_rport_data *rdata;
1392 struct fc_rport_identifiers rport_ids;
1393 struct lpfc_hba *phba = vport->phba;
1395 /* Remote port has reappeared. Re-register w/ FC transport */
1396 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1397 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1398 rport_ids.port_id = ndlp->nlp_DID;
1399 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1402 * We leave our node pointer in rport->dd_data when we unregister a
1403 * FCP target port. But fc_remote_port_add zeros the space to which
1404 * rport->dd_data points. So, if we're reusing a previously
1405 * registered port, drop the reference that we took the last time we
1406 * registered the port.
1408 if (ndlp->rport && ndlp->rport->dd_data &&
1409 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1413 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1414 "rport add: did:x%x flg:x%x type x%x",
1415 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1417 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
1418 if (!rport || !get_device(&rport->dev)) {
1419 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1420 "Warning: fc_remote_port_add failed\n");
1424 /* initialize static port data */
1425 rport->maxframe_size = ndlp->nlp_maxframe;
1426 rport->supported_classes = ndlp->nlp_class_sup;
1427 rdata = rport->dd_data;
1428 rdata->pnode = lpfc_nlp_get(ndlp);
1430 if (ndlp->nlp_type & NLP_FCP_TARGET)
1431 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1432 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1433 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1436 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
1437 fc_remote_port_rolechg(rport, rport_ids.roles);
1439 if ((rport->scsi_target_id != -1) &&
1440 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1441 ndlp->nlp_sid = rport->scsi_target_id;
1447 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1449 struct fc_rport *rport = ndlp->rport;
1451 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1452 "rport delete: did:x%x flg:x%x type x%x",
1453 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1455 fc_remote_port_delete(rport);
1461 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
1463 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1465 spin_lock_irq(shost->host_lock);
1467 case NLP_STE_UNUSED_NODE:
1468 vport->fc_unused_cnt += count;
1470 case NLP_STE_PLOGI_ISSUE:
1471 vport->fc_plogi_cnt += count;
1473 case NLP_STE_ADISC_ISSUE:
1474 vport->fc_adisc_cnt += count;
1476 case NLP_STE_REG_LOGIN_ISSUE:
1477 vport->fc_reglogin_cnt += count;
1479 case NLP_STE_PRLI_ISSUE:
1480 vport->fc_prli_cnt += count;
1482 case NLP_STE_UNMAPPED_NODE:
1483 vport->fc_unmap_cnt += count;
1485 case NLP_STE_MAPPED_NODE:
1486 vport->fc_map_cnt += count;
1488 case NLP_STE_NPR_NODE:
1489 vport->fc_npr_cnt += count;
1492 spin_unlock_irq(shost->host_lock);
1496 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1497 int old_state, int new_state)
1499 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1501 if (new_state == NLP_STE_UNMAPPED_NODE) {
1502 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1503 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1504 ndlp->nlp_type |= NLP_FC_NODE;
1506 if (new_state == NLP_STE_MAPPED_NODE)
1507 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
1508 if (new_state == NLP_STE_NPR_NODE)
1509 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
1511 /* Transport interface */
1512 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1513 old_state == NLP_STE_UNMAPPED_NODE)) {
1514 vport->phba->nport_event_cnt++;
1515 lpfc_unregister_remote_port(ndlp);
1518 if (new_state == NLP_STE_MAPPED_NODE ||
1519 new_state == NLP_STE_UNMAPPED_NODE) {
1520 vport->phba->nport_event_cnt++;
1522 * Tell the fc transport about the port, if we haven't
1523 * already. If we have, and it's a scsi entity, be
1524 * sure to unblock any attached scsi devices
1526 lpfc_register_remote_port(vport, ndlp);
1529 * if we added to Mapped list, but the remote port
1530 * registration failed or assigned a target id outside
1531 * our presentable range - move the node to the
1534 if (new_state == NLP_STE_MAPPED_NODE &&
1536 ndlp->rport->scsi_target_id == -1 ||
1537 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1538 spin_lock_irq(shost->host_lock);
1539 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1540 spin_unlock_irq(shost->host_lock);
1541 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1546 lpfc_nlp_state_name(char *buffer, size_t size, int state)
1548 static char *states[] = {
1549 [NLP_STE_UNUSED_NODE] = "UNUSED",
1550 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
1551 [NLP_STE_ADISC_ISSUE] = "ADISC",
1552 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
1553 [NLP_STE_PRLI_ISSUE] = "PRLI",
1554 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
1555 [NLP_STE_MAPPED_NODE] = "MAPPED",
1556 [NLP_STE_NPR_NODE] = "NPR",
1559 if (state < NLP_STE_MAX_STATE && states[state])
1560 strlcpy(buffer, states[state], size);
1562 snprintf(buffer, size, "unknown (%d)", state);
1567 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1570 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1571 int old_state = ndlp->nlp_state;
1572 char name1[16], name2[16];
1574 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1575 "0904 NPort state transition x%06x, %s -> %s\n",
1577 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1578 lpfc_nlp_state_name(name2, sizeof(name2), state));
1580 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1581 "node statechg did:x%x old:%d ste:%d",
1582 ndlp->nlp_DID, old_state, state);
1584 if (old_state == NLP_STE_NPR_NODE &&
1585 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1586 state != NLP_STE_NPR_NODE)
1587 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1588 if (old_state == NLP_STE_UNMAPPED_NODE) {
1589 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1590 ndlp->nlp_type &= ~NLP_FC_NODE;
1593 if (list_empty(&ndlp->nlp_listp)) {
1594 spin_lock_irq(shost->host_lock);
1595 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1596 spin_unlock_irq(shost->host_lock);
1597 } else if (old_state)
1598 lpfc_nlp_counters(vport, old_state, -1);
1600 ndlp->nlp_state = state;
1601 lpfc_nlp_counters(vport, state, 1);
1602 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
1606 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1608 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1610 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1611 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1612 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1613 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1614 spin_lock_irq(shost->host_lock);
1615 list_del_init(&ndlp->nlp_listp);
1616 spin_unlock_irq(shost->host_lock);
1617 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1618 NLP_STE_UNUSED_NODE);
1622 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1625 * Use of lpfc_drop_node and UNUSED list. lpfc_drop_node should
1626 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
1627 * the ndlp from the vport. The ndlp resides on the UNUSED list
1628 * until ALL other outstanding threads have completed. Thus, if a
1629 * ndlp is on the UNUSED list already, we should never do another
1630 * lpfc_drop_node() on it.
1632 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1638 * Start / ReStart rescue timer for Discovery / RSCN handling
1641 lpfc_set_disctmo(struct lpfc_vport *vport)
1643 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1644 struct lpfc_hba *phba = vport->phba;
1647 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
1648 /* For FAN, timeout should be greater then edtov */
1649 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1651 /* Normal discovery timeout should be > then ELS/CT timeout
1652 * FC spec states we need 3 * ratov for CT requests
1654 tmo = ((phba->fc_ratov * 3) + 3);
1658 if (!timer_pending(&vport->fc_disctmo)) {
1659 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1660 "set disc timer: tmo:x%x state:x%x flg:x%x",
1661 tmo, vport->port_state, vport->fc_flag);
1664 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1665 spin_lock_irq(shost->host_lock);
1666 vport->fc_flag |= FC_DISC_TMO;
1667 spin_unlock_irq(shost->host_lock);
1669 /* Start Discovery Timer state <hba_state> */
1670 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1671 "0247 Start Discovery Timer state x%x "
1672 "Data: x%x x%lx x%x x%x\n",
1673 vport->port_state, tmo,
1674 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1675 vport->fc_adisc_cnt);
1681 * Cancel rescue timer for Discovery / RSCN handling
1684 lpfc_can_disctmo(struct lpfc_vport *vport)
1686 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1687 unsigned long iflags;
1689 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1690 "can disc timer: state:x%x rtry:x%x flg:x%x",
1691 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1693 /* Turn off discovery timer if its running */
1694 if (vport->fc_flag & FC_DISC_TMO) {
1695 spin_lock_irqsave(shost->host_lock, iflags);
1696 vport->fc_flag &= ~FC_DISC_TMO;
1697 spin_unlock_irqrestore(shost->host_lock, iflags);
1698 del_timer_sync(&vport->fc_disctmo);
1699 spin_lock_irqsave(&vport->work_port_lock, iflags);
1700 vport->work_port_events &= ~WORKER_DISC_TMO;
1701 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
1704 /* Cancel Discovery Timer state <hba_state> */
1705 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1706 "0248 Cancel Discovery Timer state x%x "
1707 "Data: x%x x%x x%x\n",
1708 vport->port_state, vport->fc_flag,
1709 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
1714 * Check specified ring for outstanding IOCB on the SLI queue
1715 * Return true if iocb matches the specified nport
1718 lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1719 struct lpfc_sli_ring *pring,
1720 struct lpfc_iocbq *iocb,
1721 struct lpfc_nodelist *ndlp)
1723 struct lpfc_sli *psli = &phba->sli;
1724 IOCB_t *icmd = &iocb->iocb;
1725 struct lpfc_vport *vport = ndlp->vport;
1727 if (iocb->vport != vport)
1730 if (pring->ringno == LPFC_ELS_RING) {
1731 switch (icmd->ulpCommand) {
1732 case CMD_GEN_REQUEST64_CR:
1733 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1735 case CMD_ELS_REQUEST64_CR:
1736 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
1738 case CMD_XMIT_ELS_RSP64_CX:
1739 if (iocb->context1 == (uint8_t *) ndlp)
1742 } else if (pring->ringno == psli->extra_ring) {
1744 } else if (pring->ringno == psli->fcp_ring) {
1745 /* Skip match check if waiting to relogin to FCP target */
1746 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1747 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1750 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1753 } else if (pring->ringno == psli->next_ring) {
1760 * Free resources / clean up outstanding I/Os
1761 * associated with nlp_rpi in the LPFC_NODELIST entry.
1764 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1766 LIST_HEAD(completions);
1767 struct lpfc_sli *psli;
1768 struct lpfc_sli_ring *pring;
1769 struct lpfc_iocbq *iocb, *next_iocb;
1773 lpfc_fabric_abort_nport(ndlp);
1776 * Everything that matches on txcmplq will be returned
1777 * by firmware with a no rpi error.
1780 rpi = ndlp->nlp_rpi;
1782 /* Now process each ring */
1783 for (i = 0; i < psli->num_rings; i++) {
1784 pring = &psli->ring[i];
1786 spin_lock_irq(&phba->hbalock);
1787 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1790 * Check to see if iocb matches the nport we are
1793 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1795 /* It matches, so deque and call compl
1797 list_move_tail(&iocb->list,
1802 spin_unlock_irq(&phba->hbalock);
1806 while (!list_empty(&completions)) {
1807 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1808 list_del_init(&iocb->list);
1810 if (!iocb->iocb_cmpl)
1811 lpfc_sli_release_iocbq(phba, iocb);
1814 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1815 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1816 (iocb->iocb_cmpl)(phba, iocb, iocb);
1824 * Free rpi associated with LPFC_NODELIST entry.
1825 * This routine is called from lpfc_freenode(), when we are removing
1826 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1827 * LOGO that completes successfully, and we are waiting to PLOGI back
1828 * to the remote NPort. In addition, it is called after we receive
1829 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1830 * we are waiting to PLOGI back to the remote NPort.
1833 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1835 struct lpfc_hba *phba = vport->phba;
1839 if (ndlp->nlp_rpi) {
1840 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1842 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1843 mbox->vport = vport;
1844 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1845 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1846 if (rc == MBX_NOT_FINISHED)
1847 mempool_free(mbox, phba->mbox_mem_pool);
1849 lpfc_no_rpi(phba, ndlp);
1857 lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1859 struct lpfc_hba *phba = vport->phba;
1863 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1865 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1866 mbox->vport = vport;
1867 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1869 if (rc == MBX_NOT_FINISHED) {
1870 mempool_free(mbox, phba->mbox_mem_pool);
1876 lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1878 struct lpfc_hba *phba = vport->phba;
1882 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1884 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1885 mbox->vport = vport;
1886 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1887 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1888 if (rc == MBX_NOT_FINISHED) {
1889 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
1890 "1815 Could not issue "
1891 "unreg_did (default rpis)\n");
1892 mempool_free(mbox, phba->mbox_mem_pool);
1898 * Free resources associated with LPFC_NODELIST entry
1899 * so it can be freed.
1902 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1904 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1905 struct lpfc_hba *phba = vport->phba;
1906 LPFC_MBOXQ_t *mb, *nextmb;
1907 struct lpfc_dmabuf *mp;
1909 /* Cleanup node for NPort <nlp_DID> */
1910 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
1911 "0900 Cleanup node for NPort x%x "
1912 "Data: x%x x%x x%x\n",
1913 ndlp->nlp_DID, ndlp->nlp_flag,
1914 ndlp->nlp_state, ndlp->nlp_rpi);
1915 lpfc_dequeue_node(vport, ndlp);
1917 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1918 if ((mb = phba->sli.mbox_active)) {
1919 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1920 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1921 mb->context2 = NULL;
1922 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1926 spin_lock_irq(&phba->hbalock);
1927 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1928 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1929 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1930 mp = (struct lpfc_dmabuf *) (mb->context1);
1932 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1935 list_del(&mb->list);
1936 mempool_free(mb, phba->mbox_mem_pool);
1940 spin_unlock_irq(&phba->hbalock);
1942 lpfc_els_abort(phba,ndlp);
1943 spin_lock_irq(shost->host_lock);
1944 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1945 spin_unlock_irq(shost->host_lock);
1947 ndlp->nlp_last_elscmd = 0;
1948 del_timer_sync(&ndlp->nlp_delayfunc);
1950 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1951 list_del_init(&ndlp->els_retry_evt.evt_listp);
1952 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
1953 list_del_init(&ndlp->dev_loss_evt.evt_listp);
1955 lpfc_unreg_rpi(vport, ndlp);
1961 * Check to see if we can free the nlp back to the freelist.
1962 * If we are in the middle of using the nlp in the discovery state
1963 * machine, defer the free till we reach the end of the state machine.
1966 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1968 struct lpfc_rport_data *rdata;
1970 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1971 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1974 lpfc_cleanup_node(vport, ndlp);
1977 * We can get here with a non-NULL ndlp->rport because when we
1978 * unregister a rport we don't break the rport/node linkage. So if we
1979 * do, make sure we don't leaving any dangling pointers behind.
1982 rdata = ndlp->rport->dd_data;
1983 rdata->pnode = NULL;
1989 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1992 D_ID mydid, ndlpdid, matchdid;
1994 if (did == Bcast_DID)
1997 if (ndlp->nlp_DID == 0) {
2001 /* First check for Direct match */
2002 if (ndlp->nlp_DID == did)
2005 /* Next check for area/domain identically equals 0 match */
2006 mydid.un.word = vport->fc_myDID;
2007 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
2011 matchdid.un.word = did;
2012 ndlpdid.un.word = ndlp->nlp_DID;
2013 if (matchdid.un.b.id == ndlpdid.un.b.id) {
2014 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
2015 (mydid.un.b.area == matchdid.un.b.area)) {
2016 if ((ndlpdid.un.b.domain == 0) &&
2017 (ndlpdid.un.b.area == 0)) {
2018 if (ndlpdid.un.b.id)
2024 matchdid.un.word = ndlp->nlp_DID;
2025 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
2026 (mydid.un.b.area == ndlpdid.un.b.area)) {
2027 if ((matchdid.un.b.domain == 0) &&
2028 (matchdid.un.b.area == 0)) {
2029 if (matchdid.un.b.id)
2037 /* Search for a nodelist entry */
2038 static struct lpfc_nodelist *
2039 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2041 struct lpfc_nodelist *ndlp;
2044 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2045 if (lpfc_matchdid(vport, ndlp, did)) {
2046 data1 = (((uint32_t) ndlp->nlp_state << 24) |
2047 ((uint32_t) ndlp->nlp_xri << 16) |
2048 ((uint32_t) ndlp->nlp_type << 8) |
2049 ((uint32_t) ndlp->nlp_rpi & 0xff));
2050 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2051 "0929 FIND node DID "
2052 "Data: x%p x%x x%x x%x\n",
2053 ndlp, ndlp->nlp_DID,
2054 ndlp->nlp_flag, data1);
2059 /* FIND node did <did> NOT FOUND */
2060 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
2061 "0932 FIND node did x%x NOT FOUND.\n", did);
2065 struct lpfc_nodelist *
2066 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
2068 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2069 struct lpfc_nodelist *ndlp;
2071 spin_lock_irq(shost->host_lock);
2072 ndlp = __lpfc_findnode_did(vport, did);
2073 spin_unlock_irq(shost->host_lock);
2077 struct lpfc_nodelist *
2078 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2080 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2081 struct lpfc_nodelist *ndlp;
2083 ndlp = lpfc_findnode_did(vport, did);
2085 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
2086 lpfc_rscn_payload_check(vport, did) == 0)
2088 ndlp = (struct lpfc_nodelist *)
2089 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
2092 lpfc_nlp_init(vport, ndlp, did);
2093 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2094 spin_lock_irq(shost->host_lock);
2095 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2096 spin_unlock_irq(shost->host_lock);
2099 if (vport->fc_flag & FC_RSCN_MODE) {
2100 if (lpfc_rscn_payload_check(vport, did)) {
2101 /* If we've already recieved a PLOGI from this NPort
2102 * we don't need to try to discover it again.
2104 if (ndlp->nlp_flag & NLP_RCV_PLOGI)
2107 spin_lock_irq(shost->host_lock);
2108 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2109 spin_unlock_irq(shost->host_lock);
2111 /* Since this node is marked for discovery,
2112 * delay timeout is not needed.
2114 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2115 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2119 /* If we've already recieved a PLOGI from this NPort,
2120 * or we are already in the process of discovery on it,
2121 * we don't need to try to discover it again.
2123 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
2124 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2125 ndlp->nlp_flag & NLP_RCV_PLOGI)
2127 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2128 spin_lock_irq(shost->host_lock);
2129 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2130 spin_unlock_irq(shost->host_lock);
2135 /* Build a list of nodes to discover based on the loopmap */
2137 lpfc_disc_list_loopmap(struct lpfc_vport *vport)
2139 struct lpfc_hba *phba = vport->phba;
2141 uint32_t alpa, index;
2143 if (!lpfc_is_link_up(phba))
2146 if (phba->fc_topology != TOPOLOGY_LOOP)
2149 /* Check for loop map present or not */
2150 if (phba->alpa_map[0]) {
2151 for (j = 1; j <= phba->alpa_map[0]; j++) {
2152 alpa = phba->alpa_map[j];
2153 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
2155 lpfc_setup_disc_node(vport, alpa);
2158 /* No alpamap, so try all alpa's */
2159 for (j = 0; j < FC_MAXLOOP; j++) {
2160 /* If cfg_scan_down is set, start from highest
2161 * ALPA (0xef) to lowest (0x1).
2163 if (vport->cfg_scan_down)
2166 index = FC_MAXLOOP - j - 1;
2167 alpa = lpfcAlpaArray[index];
2168 if ((vport->fc_myDID & 0xff) == alpa)
2170 lpfc_setup_disc_node(vport, alpa);
2177 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
2180 struct lpfc_sli *psli = &phba->sli;
2181 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2182 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
2183 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
2187 * if it's not a physical port or if we already send
2188 * clear_la then don't send it.
2190 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2191 (vport->port_type != LPFC_PHYSICAL_PORT))
2194 /* Link up discovery */
2195 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2196 phba->link_state = LPFC_CLEAR_LA;
2197 lpfc_clear_la(phba, mbox);
2198 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2199 mbox->vport = vport;
2200 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2201 if (rc == MBX_NOT_FINISHED) {
2202 mempool_free(mbox, phba->mbox_mem_pool);
2203 lpfc_disc_flush_list(vport);
2204 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2205 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2206 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2207 phba->link_state = LPFC_HBA_ERROR;
2212 /* Reg_vpi to tell firmware to resume normal operations */
2214 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2216 LPFC_MBOXQ_t *regvpimbox;
2218 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2220 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2221 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2222 regvpimbox->vport = vport;
2223 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
2224 == MBX_NOT_FINISHED) {
2225 mempool_free(regvpimbox, phba->mbox_mem_pool);
2230 /* Start Link up / RSCN discovery on NPR nodes */
2232 lpfc_disc_start(struct lpfc_vport *vport)
2234 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2235 struct lpfc_hba *phba = vport->phba;
2237 uint32_t clear_la_pending;
2240 if (!lpfc_is_link_up(phba))
2243 if (phba->link_state == LPFC_CLEAR_LA)
2244 clear_la_pending = 1;
2246 clear_la_pending = 0;
2248 if (vport->port_state < LPFC_VPORT_READY)
2249 vport->port_state = LPFC_DISC_AUTH;
2251 lpfc_set_disctmo(vport);
2253 if (vport->fc_prevDID == vport->fc_myDID)
2258 vport->fc_prevDID = vport->fc_myDID;
2259 vport->num_disc_nodes = 0;
2261 /* Start Discovery state <hba_state> */
2262 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2263 "0202 Start Discovery hba state x%x "
2264 "Data: x%x x%x x%x\n",
2265 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
2266 vport->fc_adisc_cnt);
2268 /* First do ADISCs - if any */
2269 num_sent = lpfc_els_disc_adisc(vport);
2275 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2276 * continue discovery.
2278 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2279 !(vport->fc_flag & FC_RSCN_MODE)) {
2280 lpfc_issue_reg_vpi(phba, vport);
2285 * For SLI2, we need to set port_state to READY and continue
2288 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
2289 /* If we get here, there is nothing to ADISC */
2290 if (vport->port_type == LPFC_PHYSICAL_PORT)
2291 lpfc_issue_clear_la(phba, vport);
2293 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2294 vport->num_disc_nodes = 0;
2295 /* go thru NPR nodes and issue ELS PLOGIs */
2296 if (vport->fc_npr_cnt)
2297 lpfc_els_disc_plogi(vport);
2299 if (!vport->num_disc_nodes) {
2300 spin_lock_irq(shost->host_lock);
2301 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2302 spin_unlock_irq(shost->host_lock);
2303 lpfc_can_disctmo(vport);
2306 vport->port_state = LPFC_VPORT_READY;
2308 /* Next do PLOGIs - if any */
2309 num_sent = lpfc_els_disc_plogi(vport);
2314 if (vport->fc_flag & FC_RSCN_MODE) {
2315 /* Check to see if more RSCNs came in while we
2316 * were processing this one.
2318 if ((vport->fc_rscn_id_cnt == 0) &&
2319 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
2320 spin_lock_irq(shost->host_lock);
2321 vport->fc_flag &= ~FC_RSCN_MODE;
2322 spin_unlock_irq(shost->host_lock);
2323 lpfc_can_disctmo(vport);
2325 lpfc_els_handle_rscn(vport);
2332 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2333 * ring the match the sppecified nodelist.
2336 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2338 LIST_HEAD(completions);
2339 struct lpfc_sli *psli;
2341 struct lpfc_iocbq *iocb, *next_iocb;
2342 struct lpfc_sli_ring *pring;
2345 pring = &psli->ring[LPFC_ELS_RING];
2347 /* Error matching iocb on txq or txcmplq
2348 * First check the txq.
2350 spin_lock_irq(&phba->hbalock);
2351 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2352 if (iocb->context1 != ndlp) {
2356 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2357 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2359 list_move_tail(&iocb->list, &completions);
2364 /* Next check the txcmplq */
2365 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2366 if (iocb->context1 != ndlp) {
2370 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
2371 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
2372 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
2375 spin_unlock_irq(&phba->hbalock);
2377 while (!list_empty(&completions)) {
2378 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
2379 list_del_init(&iocb->list);
2381 if (!iocb->iocb_cmpl)
2382 lpfc_sli_release_iocbq(phba, iocb);
2385 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2386 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2387 (iocb->iocb_cmpl) (phba, iocb, iocb);
2393 lpfc_disc_flush_list(struct lpfc_vport *vport)
2395 struct lpfc_nodelist *ndlp, *next_ndlp;
2396 struct lpfc_hba *phba = vport->phba;
2398 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2399 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2401 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2402 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2403 lpfc_free_tx(phba, ndlp);
2410 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2412 lpfc_els_flush_rscn(vport);
2413 lpfc_els_flush_cmd(vport);
2414 lpfc_disc_flush_list(vport);
2417 /*****************************************************************************/
2419 * NAME: lpfc_disc_timeout
2421 * FUNCTION: Fibre Channel driver discovery timeout routine.
2423 * EXECUTION ENVIRONMENT: interrupt only
2431 /*****************************************************************************/
2433 lpfc_disc_timeout(unsigned long ptr)
2435 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2436 struct lpfc_hba *phba = vport->phba;
2437 unsigned long flags = 0;
2439 if (unlikely(!phba))
2442 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
2443 spin_lock_irqsave(&vport->work_port_lock, flags);
2444 vport->work_port_events |= WORKER_DISC_TMO;
2445 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2447 spin_lock_irqsave(&phba->hbalock, flags);
2448 if (phba->work_wait)
2449 lpfc_worker_wake_up(phba);
2450 spin_unlock_irqrestore(&phba->hbalock, flags);
2456 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2458 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2459 struct lpfc_hba *phba = vport->phba;
2460 struct lpfc_sli *psli = &phba->sli;
2461 struct lpfc_nodelist *ndlp, *next_ndlp;
2462 LPFC_MBOXQ_t *initlinkmbox;
2463 int rc, clrlaerr = 0;
2465 if (!(vport->fc_flag & FC_DISC_TMO))
2468 spin_lock_irq(shost->host_lock);
2469 vport->fc_flag &= ~FC_DISC_TMO;
2470 spin_unlock_irq(shost->host_lock);
2472 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2473 "disc timeout: state:x%x rtry:x%x flg:x%x",
2474 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2476 switch (vport->port_state) {
2478 case LPFC_LOCAL_CFG_LINK:
2479 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2483 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
2484 "0221 FAN timeout\n");
2485 /* Start discovery by sending FLOGI, clean up old rpis */
2486 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2488 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2490 if (ndlp->nlp_type & NLP_FABRIC) {
2491 /* Clean up the ndlp on Fabric connections */
2492 lpfc_drop_node(vport, ndlp);
2494 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2495 /* Fail outstanding IO now since device
2496 * is marked for PLOGI.
2498 lpfc_unreg_rpi(vport, ndlp);
2501 if (vport->port_state != LPFC_FLOGI) {
2502 lpfc_initial_flogi(vport);
2508 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2509 /* Initial FLOGI timeout */
2510 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2511 "0222 Initial %s timeout\n",
2512 vport->vpi ? "FDISC" : "FLOGI");
2514 /* Assume no Fabric and go on with discovery.
2515 * Check for outstanding ELS FLOGI to abort.
2518 /* FLOGI failed, so just use loop map to make discovery list */
2519 lpfc_disc_list_loopmap(vport);
2521 /* Start discovery */
2522 lpfc_disc_start(vport);
2525 case LPFC_FABRIC_CFG_LINK:
2526 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2528 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2529 "0223 Timeout while waiting for "
2530 "NameServer login\n");
2531 /* Next look for NameServer ndlp */
2532 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2534 lpfc_els_abort(phba, ndlp);
2536 /* ReStart discovery */
2540 /* Check for wait for NameServer Rsp timeout */
2541 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2542 "0224 NameServer Query timeout "
2544 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2546 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2547 /* Try it one more time */
2548 vport->fc_ns_retry++;
2549 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2550 vport->fc_ns_retry, 0);
2554 vport->fc_ns_retry = 0;
2558 * Discovery is over.
2559 * set port_state to PORT_READY if SLI2.
2560 * cmpl_reg_vpi will set port_state to READY for SLI3.
2562 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2563 lpfc_issue_reg_vpi(phba, vport);
2564 else { /* NPIV Not enabled */
2565 lpfc_issue_clear_la(phba, vport);
2566 vport->port_state = LPFC_VPORT_READY;
2569 /* Setup and issue mailbox INITIALIZE LINK command */
2570 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2571 if (!initlinkmbox) {
2572 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2573 "0206 Device Discovery "
2574 "completion error\n");
2575 phba->link_state = LPFC_HBA_ERROR;
2579 lpfc_linkdown(phba);
2580 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2581 phba->cfg_link_speed);
2582 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2583 initlinkmbox->vport = vport;
2584 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2585 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
2586 lpfc_set_loopback_flag(phba);
2587 if (rc == MBX_NOT_FINISHED)
2588 mempool_free(initlinkmbox, phba->mbox_mem_pool);
2592 case LPFC_DISC_AUTH:
2593 /* Node Authentication timeout */
2594 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2595 "0227 Node Authentication timeout\n");
2596 lpfc_disc_flush_list(vport);
2599 * set port_state to PORT_READY if SLI2.
2600 * cmpl_reg_vpi will set port_state to READY for SLI3.
2602 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2603 lpfc_issue_reg_vpi(phba, vport);
2604 else { /* NPIV Not enabled */
2605 lpfc_issue_clear_la(phba, vport);
2606 vport->port_state = LPFC_VPORT_READY;
2610 case LPFC_VPORT_READY:
2611 if (vport->fc_flag & FC_RSCN_MODE) {
2612 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2613 "0231 RSCN timeout Data: x%x "
2615 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2617 /* Cleanup any outstanding ELS commands */
2618 lpfc_els_flush_cmd(vport);
2620 lpfc_els_flush_rscn(vport);
2621 lpfc_disc_flush_list(vport);
2626 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2627 "0229 Unexpected discovery timeout, "
2628 "vport State x%x\n", vport->port_state);
2632 switch (phba->link_state) {
2634 /* CLEAR LA timeout */
2635 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2636 "0228 CLEAR LA timeout\n");
2640 case LPFC_LINK_UNKNOWN:
2641 case LPFC_WARM_START:
2642 case LPFC_INIT_START:
2643 case LPFC_INIT_MBX_CMDS:
2644 case LPFC_LINK_DOWN:
2646 case LPFC_HBA_ERROR:
2647 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2648 "0230 Unexpected timeout, hba link "
2649 "state x%x\n", phba->link_state);
2653 case LPFC_HBA_READY:
2658 lpfc_disc_flush_list(vport);
2659 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2660 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2661 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2662 vport->port_state = LPFC_VPORT_READY;
2669 * This routine handles processing a NameServer REG_LOGIN mailbox
2670 * command upon completion. It is setup in the LPFC_MBOXQ
2671 * as the completion routine when the command is
2672 * handed off to the SLI layer.
2675 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2677 MAILBOX_t *mb = &pmb->mb;
2678 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2679 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2680 struct lpfc_vport *vport = pmb->vport;
2682 pmb->context1 = NULL;
2684 ndlp->nlp_rpi = mb->un.varWords[0];
2685 ndlp->nlp_type |= NLP_FABRIC;
2686 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2689 * Start issuing Fabric-Device Management Interface (FDMI) command to
2690 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2691 * fdmi-on=2 (supporting RPA/hostnmae)
2694 if (vport->cfg_fdmi_on == 1)
2695 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2697 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2699 /* Mailbox took a reference to the node */
2701 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2703 mempool_free(pmb, phba->mbox_mem_pool);
2709 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
2711 uint16_t *rpi = param;
2713 return ndlp->nlp_rpi == *rpi;
2717 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2719 return memcmp(&ndlp->nlp_portname, param,
2720 sizeof(ndlp->nlp_portname)) == 0;
2723 struct lpfc_nodelist *
2724 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2726 struct lpfc_nodelist *ndlp;
2728 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2729 if (filter(ndlp, param))
2736 * Search node lists for a remote port matching filter criteria
2737 * Caller needs to hold host_lock before calling this routine.
2739 struct lpfc_nodelist *
2740 lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2742 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2743 struct lpfc_nodelist *ndlp;
2745 spin_lock_irq(shost->host_lock);
2746 ndlp = __lpfc_find_node(vport, filter, param);
2747 spin_unlock_irq(shost->host_lock);
2752 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2753 * returns the node list element pointer else return NULL.
2755 struct lpfc_nodelist *
2756 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2758 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2761 struct lpfc_nodelist *
2762 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2764 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2765 struct lpfc_nodelist *ndlp;
2767 spin_lock_irq(shost->host_lock);
2768 ndlp = __lpfc_findnode_rpi(vport, rpi);
2769 spin_unlock_irq(shost->host_lock);
2774 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2775 * returns the node element list pointer else return NULL.
2777 struct lpfc_nodelist *
2778 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2780 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2781 struct lpfc_nodelist *ndlp;
2783 spin_lock_irq(shost->host_lock);
2784 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2785 spin_unlock_irq(shost->host_lock);
2790 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2793 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2794 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2795 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2796 init_timer(&ndlp->nlp_delayfunc);
2797 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2798 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2799 ndlp->nlp_DID = did;
2800 ndlp->vport = vport;
2801 ndlp->nlp_sid = NLP_NO_SID;
2802 INIT_LIST_HEAD(&ndlp->nlp_listp);
2803 kref_init(&ndlp->kref);
2805 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2806 "node init: did:x%x",
2807 ndlp->nlp_DID, 0, 0);
2812 /* This routine releases all resources associated with a specifc NPort's ndlp
2813 * and mempool_free's the nodelist.
2816 lpfc_nlp_release(struct kref *kref)
2818 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2821 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2822 "node release: did:x%x flg:x%x type:x%x",
2823 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2825 lpfc_nlp_remove(ndlp->vport, ndlp);
2826 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2829 /* This routine bumps the reference count for a ndlp structure to ensure
2830 * that one discovery thread won't free a ndlp while another discovery thread
2833 struct lpfc_nodelist *
2834 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2837 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2838 "node get: did:x%x flg:x%x refcnt:x%x",
2839 ndlp->nlp_DID, ndlp->nlp_flag,
2840 atomic_read(&ndlp->kref.refcount));
2841 kref_get(&ndlp->kref);
2847 /* This routine decrements the reference count for a ndlp structure. If the
2848 * count goes to 0, this indicates the the associated nodelist should be freed.
2851 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2854 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2855 "node put: did:x%x flg:x%x refcnt:x%x",
2856 ndlp->nlp_DID, ndlp->nlp_flag,
2857 atomic_read(&ndlp->kref.refcount));
2859 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
2862 /* This routine free's the specified nodelist if it is not in use
2863 * by any other discovery thread. This routine returns 1 if the ndlp
2864 * is not being used by anyone and has been freed. A return value of
2865 * 0 indicates it is being used by another discovery thread and the
2866 * refcount is left unchanged.
2869 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
2871 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2872 "node not used: did:x%x flg:x%x refcnt:x%x",
2873 ndlp->nlp_DID, ndlp->nlp_flag,
2874 atomic_read(&ndlp->kref.refcount));
2876 if (atomic_read(&ndlp->kref.refcount) == 1) {