1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
32 #include "lpfc_version.h"
36 #include "lpfc_disc.h"
37 #include "lpfc_scsi.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_crtn.h"
41 #include "lpfc_vport.h"
43 #define LPFC_RESET_WAIT 2
44 #define LPFC_ABORT_WAIT 2
47 * lpfc_update_stats: Update statistical data for the command completion.
48 * @phba: Pointer to HBA object.
49 * @lpfc_cmd: lpfc scsi command object pointer.
51 * This function is called when there is a command completion and this
52 * function updates the statistical data for the command completion.
55 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
57 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
58 struct lpfc_nodelist *pnode = rdata->pnode;
59 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
61 struct Scsi_Host *shost = cmd->device->host;
62 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
63 unsigned long latency;
69 spin_lock_irqsave(shost->host_lock, flags);
70 if (!vport->stat_data_enabled ||
71 vport->stat_data_blocked ||
73 (phba->bucket_type == LPFC_NO_BUCKET)) {
74 spin_unlock_irqrestore(shost->host_lock, flags);
77 latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
79 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
80 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
82 if (i >= LPFC_MAX_BUCKET_COUNT)
83 i = LPFC_MAX_BUCKET_COUNT;
85 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
86 if (latency <= (phba->bucket_base +
87 ((1<<i)*phba->bucket_step)))
91 pnode->lat_data[i].cmd_count++;
92 spin_unlock_irqrestore(shost->host_lock, flags);
96 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
98 * @phba: Pointer to HBA context object.
99 * @vport: Pointer to vport object.
100 * @ndlp: Pointer to FC node associated with the target.
101 * @lun: Lun number of the scsi device.
102 * @old_val: Old value of the queue depth.
103 * @new_val: New value of the queue depth.
105 * This function sends an event to the mgmt application indicating
106 * there is a change in the scsi device queue depth.
109 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
110 struct lpfc_vport *vport,
111 struct lpfc_nodelist *ndlp,
116 struct lpfc_fast_path_event *fast_path_evt;
119 fast_path_evt = lpfc_alloc_fast_evt(phba);
123 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
125 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
126 LPFC_EVENT_VARQUEDEPTH;
128 /* Report all luns with change in queue depth */
129 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
130 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
131 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
132 &ndlp->nlp_portname, sizeof(struct lpfc_name));
133 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
134 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
137 fast_path_evt->un.queue_depth_evt.oldval = old_val;
138 fast_path_evt->un.queue_depth_evt.newval = new_val;
139 fast_path_evt->vport = vport;
141 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
142 spin_lock_irqsave(&phba->hbalock, flags);
143 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
144 spin_unlock_irqrestore(&phba->hbalock, flags);
145 lpfc_worker_wake_up(phba);
151 * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
152 * @phba: The Hba for which this call is being executed.
154 * This routine is called when there is resource error in driver or firmware.
155 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
156 * posts at most 1 event each second. This routine wakes up worker thread of
157 * @phba to process WORKER_RAM_DOWN_EVENT event.
159 * This routine should be called with no lock held.
162 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
167 spin_lock_irqsave(&phba->hbalock, flags);
168 atomic_inc(&phba->num_rsrc_err);
169 phba->last_rsrc_error_time = jiffies;
171 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
172 spin_unlock_irqrestore(&phba->hbalock, flags);
176 phba->last_ramp_down_time = jiffies;
178 spin_unlock_irqrestore(&phba->hbalock, flags);
180 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
181 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
183 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
184 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
187 lpfc_worker_wake_up(phba);
192 * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
193 * @phba: The Hba for which this call is being executed.
195 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
196 * post at most 1 event every 5 minute after last_ramp_up_time or
197 * last_rsrc_error_time. This routine wakes up worker thread of @phba
198 * to process WORKER_RAM_DOWN_EVENT event.
200 * This routine should be called with no lock held.
203 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
204 struct scsi_device *sdev)
207 struct lpfc_hba *phba = vport->phba;
209 atomic_inc(&phba->num_cmd_success);
211 if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
213 spin_lock_irqsave(&phba->hbalock, flags);
214 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
215 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
216 spin_unlock_irqrestore(&phba->hbalock, flags);
219 phba->last_ramp_up_time = jiffies;
220 spin_unlock_irqrestore(&phba->hbalock, flags);
222 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
223 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
225 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
226 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
229 lpfc_worker_wake_up(phba);
234 * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
235 * @phba: The Hba for which this call is being executed.
237 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
238 * thread.This routine reduces queue depth for all scsi device on each vport
239 * associated with @phba.
242 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
244 struct lpfc_vport **vports;
245 struct Scsi_Host *shost;
246 struct scsi_device *sdev;
247 unsigned long new_queue_depth, old_queue_depth;
248 unsigned long num_rsrc_err, num_cmd_success;
250 struct lpfc_rport_data *rdata;
252 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
253 num_cmd_success = atomic_read(&phba->num_cmd_success);
255 vports = lpfc_create_vport_work_array(phba);
257 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
258 shost = lpfc_shost_from_vport(vports[i]);
259 shost_for_each_device(sdev, shost) {
261 sdev->queue_depth * num_rsrc_err /
262 (num_rsrc_err + num_cmd_success);
263 if (!new_queue_depth)
264 new_queue_depth = sdev->queue_depth - 1;
266 new_queue_depth = sdev->queue_depth -
268 old_queue_depth = sdev->queue_depth;
269 if (sdev->ordered_tags)
270 scsi_adjust_queue_depth(sdev,
274 scsi_adjust_queue_depth(sdev,
277 rdata = sdev->hostdata;
279 lpfc_send_sdev_queuedepth_change_event(
282 sdev->lun, old_queue_depth,
286 lpfc_destroy_vport_work_array(phba, vports);
287 atomic_set(&phba->num_rsrc_err, 0);
288 atomic_set(&phba->num_cmd_success, 0);
292 * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
293 * @phba: The Hba for which this call is being executed.
295 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
296 * thread.This routine increases queue depth for all scsi device on each vport
297 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
298 * num_cmd_success to zero.
301 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
303 struct lpfc_vport **vports;
304 struct Scsi_Host *shost;
305 struct scsi_device *sdev;
307 struct lpfc_rport_data *rdata;
309 vports = lpfc_create_vport_work_array(phba);
311 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
312 shost = lpfc_shost_from_vport(vports[i]);
313 shost_for_each_device(sdev, shost) {
314 if (vports[i]->cfg_lun_queue_depth <=
317 if (sdev->ordered_tags)
318 scsi_adjust_queue_depth(sdev,
320 sdev->queue_depth+1);
322 scsi_adjust_queue_depth(sdev,
324 sdev->queue_depth+1);
325 rdata = sdev->hostdata;
327 lpfc_send_sdev_queuedepth_change_event(
331 sdev->queue_depth - 1,
335 lpfc_destroy_vport_work_array(phba, vports);
336 atomic_set(&phba->num_rsrc_err, 0);
337 atomic_set(&phba->num_cmd_success, 0);
341 * lpfc_scsi_dev_block: set all scsi hosts to block state.
342 * @phba: Pointer to HBA context object.
344 * This function walks vport list and set each SCSI host to block state
345 * by invoking fc_remote_port_delete() routine. This function is invoked
346 * with EEH when device's PCI slot has been permanently disabled.
349 lpfc_scsi_dev_block(struct lpfc_hba *phba)
351 struct lpfc_vport **vports;
352 struct Scsi_Host *shost;
353 struct scsi_device *sdev;
354 struct fc_rport *rport;
357 vports = lpfc_create_vport_work_array(phba);
359 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
360 shost = lpfc_shost_from_vport(vports[i]);
361 shost_for_each_device(sdev, shost) {
362 rport = starget_to_rport(scsi_target(sdev));
363 fc_remote_port_delete(rport);
366 lpfc_destroy_vport_work_array(phba, vports);
370 * lpfc_new_scsi_buf: Scsi buffer allocator.
371 * @vport: The virtual port for which this call being executed.
373 * This routine allocates a scsi buffer, which contains all the necessary
374 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
375 * contains information to build the IOCB. The DMAable region contains
376 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
377 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
378 * and the BPL BDE is setup in the IOCB.
382 * Pointer to lpfc_scsi_buf data structure - Success
384 static struct lpfc_scsi_buf *
385 lpfc_new_scsi_buf(struct lpfc_vport *vport)
387 struct lpfc_hba *phba = vport->phba;
388 struct lpfc_scsi_buf *psb;
389 struct ulp_bde64 *bpl;
391 dma_addr_t pdma_phys_fcp_cmd;
392 dma_addr_t pdma_phys_fcp_rsp;
393 dma_addr_t pdma_phys_bpl;
396 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
401 * Get memory from the pci pool to map the virt space to pci bus space
402 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
403 * struct fcp_rsp and the number of bde's necessary to support the
406 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
413 /* Initialize virtual ptrs to dma_buf region. */
414 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
416 /* Allocate iotag for psb->cur_iocbq. */
417 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
419 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
420 psb->data, psb->dma_handle);
424 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
426 psb->fcp_cmnd = psb->data;
427 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
428 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
429 sizeof(struct fcp_rsp);
431 /* Initialize local short-hand pointers. */
433 pdma_phys_fcp_cmd = psb->dma_handle;
434 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
435 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
436 sizeof(struct fcp_rsp);
439 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
440 * list bdes. Initialize the first two and leave the rest for
443 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
444 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
445 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
446 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
447 bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
449 /* Setup the physical region for the FCP RSP */
450 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
451 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
452 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
453 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
454 bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
457 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
458 * initialize it with all known data now.
460 iocb = &psb->cur_iocbq.iocb;
461 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
462 if (phba->sli_rev == 3) {
463 /* fill in immediate fcp command BDE */
464 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
465 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
466 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
468 iocb->un.fcpi64.bdl.addrHigh = 0;
469 iocb->ulpBdeCount = 0;
471 /* fill in responce BDE */
472 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
473 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
474 sizeof(struct fcp_rsp);
475 iocb->unsli3.fcp_ext.rbde.addrLow =
476 putPaddrLow(pdma_phys_fcp_rsp);
477 iocb->unsli3.fcp_ext.rbde.addrHigh =
478 putPaddrHigh(pdma_phys_fcp_rsp);
480 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
481 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
482 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
483 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
484 iocb->ulpBdeCount = 1;
487 iocb->ulpClass = CLASS3;
493 * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
494 * @phba: The Hba for which this call is being executed.
496 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
497 * and returns to caller.
501 * Pointer to lpfc_scsi_buf - Success
503 static struct lpfc_scsi_buf*
504 lpfc_get_scsi_buf(struct lpfc_hba * phba)
506 struct lpfc_scsi_buf * lpfc_cmd = NULL;
507 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
508 unsigned long iflag = 0;
510 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
511 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
513 lpfc_cmd->seg_cnt = 0;
514 lpfc_cmd->nonsg_phys = 0;
516 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
521 * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
522 * @phba: The Hba for which this call is being executed.
523 * @psb: The scsi buffer which is being released.
525 * This routine releases @psb scsi buffer by adding it to tail of @phba
526 * lpfc_scsi_buf_list list.
529 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
531 unsigned long iflag = 0;
533 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
535 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
536 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
540 * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
541 * @phba: The Hba for which this call is being executed.
542 * @lpfc_cmd: The scsi buffer which is going to be mapped.
544 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
545 * field of @lpfc_cmd. This routine scans through sg elements and format the
546 * bdea. This routine also initializes all IOCB fields which are dependent on
547 * scsi command request buffer.
554 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
556 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
557 struct scatterlist *sgel = NULL;
558 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
559 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
560 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
561 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
563 uint32_t num_bde = 0;
564 int nseg, datadir = scsi_cmnd->sc_data_direction;
567 * There are three possibilities here - use scatter-gather segment, use
568 * the single mapping, or neither. Start the lpfc command prep by
569 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
573 if (scsi_sg_count(scsi_cmnd)) {
575 * The driver stores the segment count returned from pci_map_sg
576 * because this a count of dma-mappings used to map the use_sg
577 * pages. They are not guaranteed to be the same for those
578 * architectures that implement an IOMMU.
581 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
582 scsi_sg_count(scsi_cmnd), datadir);
586 lpfc_cmd->seg_cnt = nseg;
587 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
588 printk(KERN_ERR "%s: Too many sg segments from "
589 "dma_map_sg. Config %d, seg_cnt %d",
590 __func__, phba->cfg_sg_seg_cnt,
592 scsi_dma_unmap(scsi_cmnd);
597 * The driver established a maximum scatter-gather segment count
598 * during probe that limits the number of sg elements in any
599 * single scsi command. Just run through the seg_cnt and format
601 * When using SLI-3 the driver will try to fit all the BDEs into
602 * the IOCB. If it can't then the BDEs get added to a BPL as it
603 * does for SLI-2 mode.
605 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
606 physaddr = sg_dma_address(sgel);
607 if (phba->sli_rev == 3 &&
608 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
609 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
610 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
611 data_bde->addrLow = putPaddrLow(physaddr);
612 data_bde->addrHigh = putPaddrHigh(physaddr);
615 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
616 bpl->tus.f.bdeSize = sg_dma_len(sgel);
617 bpl->tus.w = le32_to_cpu(bpl->tus.w);
619 le32_to_cpu(putPaddrLow(physaddr));
621 le32_to_cpu(putPaddrHigh(physaddr));
628 * Finish initializing those IOCB fields that are dependent on the
629 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
630 * explicitly reinitialized and for SLI-3 the extended bde count is
631 * explicitly reinitialized since all iocb memory resources are reused.
633 if (phba->sli_rev == 3) {
634 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
636 * The extended IOCB format can only fit 3 BDE or a BPL.
637 * This I/O has more than 3 BDE so the 1st data bde will
638 * be a BPL that is filled in here.
640 physaddr = lpfc_cmd->dma_handle;
641 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
642 data_bde->tus.f.bdeSize = (num_bde *
643 sizeof(struct ulp_bde64));
644 physaddr += (sizeof(struct fcp_cmnd) +
645 sizeof(struct fcp_rsp) +
646 (2 * sizeof(struct ulp_bde64)));
647 data_bde->addrHigh = putPaddrHigh(physaddr);
648 data_bde->addrLow = putPaddrLow(physaddr);
649 /* ebde count includes the responce bde and data bpl */
650 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
652 /* ebde count includes the responce bde and data bdes */
653 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
656 iocb_cmd->un.fcpi64.bdl.bdeSize =
657 ((num_bde + 2) * sizeof(struct ulp_bde64));
659 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
664 * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
665 * @phba: Pointer to hba context object.
666 * @vport: Pointer to vport object.
667 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
668 * @rsp_iocb: Pointer to response iocb object which reported error.
670 * This function posts an event when there is a SCSI command reporting
671 * error from the scsi device.
674 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
675 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
676 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
677 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
678 uint32_t resp_info = fcprsp->rspStatus2;
679 uint32_t scsi_status = fcprsp->rspStatus3;
680 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
681 struct lpfc_fast_path_event *fast_path_evt = NULL;
682 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
685 /* If there is queuefull or busy condition send a scsi event */
686 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
687 (cmnd->result == SAM_STAT_BUSY)) {
688 fast_path_evt = lpfc_alloc_fast_evt(phba);
691 fast_path_evt->un.scsi_evt.event_type =
693 fast_path_evt->un.scsi_evt.subcategory =
694 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
695 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
696 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
697 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
698 &pnode->nlp_portname, sizeof(struct lpfc_name));
699 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
700 &pnode->nlp_nodename, sizeof(struct lpfc_name));
701 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
702 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
703 fast_path_evt = lpfc_alloc_fast_evt(phba);
706 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
708 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
709 LPFC_EVENT_CHECK_COND;
710 fast_path_evt->un.check_cond_evt.scsi_event.lun =
712 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
713 &pnode->nlp_portname, sizeof(struct lpfc_name));
714 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
715 &pnode->nlp_nodename, sizeof(struct lpfc_name));
716 fast_path_evt->un.check_cond_evt.sense_key =
717 cmnd->sense_buffer[2] & 0xf;
718 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
719 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
720 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
722 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
723 ((scsi_status == SAM_STAT_GOOD) &&
724 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
726 * If status is good or resid does not match with fcp_param and
727 * there is valid fcpi_parm, then there is a read_check error
729 fast_path_evt = lpfc_alloc_fast_evt(phba);
732 fast_path_evt->un.read_check_error.header.event_type =
734 fast_path_evt->un.read_check_error.header.subcategory =
735 LPFC_EVENT_FCPRDCHKERR;
736 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
737 &pnode->nlp_portname, sizeof(struct lpfc_name));
738 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
739 &pnode->nlp_nodename, sizeof(struct lpfc_name));
740 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
741 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
742 fast_path_evt->un.read_check_error.fcpiparam =
747 fast_path_evt->vport = vport;
748 spin_lock_irqsave(&phba->hbalock, flags);
749 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
750 spin_unlock_irqrestore(&phba->hbalock, flags);
751 lpfc_worker_wake_up(phba);
756 * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
757 * @phba: The Hba for which this call is being executed.
758 * @psb: The scsi buffer which is going to be un-mapped.
760 * This routine does DMA un-mapping of scatter gather list of scsi command
761 * field of @lpfc_cmd.
764 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
767 * There are only two special cases to consider. (1) the scsi command
768 * requested scatter-gather usage or (2) the scsi command allocated
769 * a request buffer, but did not request use_sg. There is a third
770 * case, but it does not require resource deallocation.
772 if (psb->seg_cnt > 0)
773 scsi_dma_unmap(psb->pCmd);
777 * lpfc_handler_fcp_err: FCP response handler.
778 * @vport: The virtual port for which this call is being executed.
779 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
780 * @rsp_iocb: The response IOCB which contains FCP error.
782 * This routine is called to process response IOCB with status field
783 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
784 * based upon SCSI and FCP error.
787 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
788 struct lpfc_iocbq *rsp_iocb)
790 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
791 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
792 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
793 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
794 uint32_t resp_info = fcprsp->rspStatus2;
795 uint32_t scsi_status = fcprsp->rspStatus3;
797 uint32_t host_status = DID_OK;
799 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
803 * If this is a task management command, there is no
804 * scsi packet associated with this lpfc_cmd. The driver
807 if (fcpcmd->fcpCntl2) {
812 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
813 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
814 if (snslen > SCSI_SENSE_BUFFERSIZE)
815 snslen = SCSI_SENSE_BUFFERSIZE;
817 if (resp_info & RSP_LEN_VALID)
818 rsplen = be32_to_cpu(fcprsp->rspRspLen);
819 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
821 lp = (uint32_t *)cmnd->sense_buffer;
823 if (!scsi_status && (resp_info & RESID_UNDER))
826 lpfc_printf_vlog(vport, KERN_WARNING, logit,
827 "0730 FCP command x%x failed: x%x SNS x%x x%x "
828 "Data: x%x x%x x%x x%x x%x\n",
829 cmnd->cmnd[0], scsi_status,
830 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
831 be32_to_cpu(fcprsp->rspResId),
832 be32_to_cpu(fcprsp->rspSnsLen),
833 be32_to_cpu(fcprsp->rspRspLen),
836 if (resp_info & RSP_LEN_VALID) {
837 rsplen = be32_to_cpu(fcprsp->rspRspLen);
838 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
839 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
840 host_status = DID_ERROR;
845 scsi_set_resid(cmnd, 0);
846 if (resp_info & RESID_UNDER) {
847 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
849 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
850 "0716 FCP Read Underrun, expected %d, "
851 "residual %d Data: x%x x%x x%x\n",
852 be32_to_cpu(fcpcmd->fcpDl),
853 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
857 * If there is an under run check if under run reported by
858 * storage array is same as the under run reported by HBA.
859 * If this is not same, there is a dropped frame.
861 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
863 (scsi_get_resid(cmnd) != fcpi_parm)) {
864 lpfc_printf_vlog(vport, KERN_WARNING,
865 LOG_FCP | LOG_FCP_ERROR,
866 "0735 FCP Read Check Error "
867 "and Underrun Data: x%x x%x x%x x%x\n",
868 be32_to_cpu(fcpcmd->fcpDl),
869 scsi_get_resid(cmnd), fcpi_parm,
871 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
872 host_status = DID_ERROR;
875 * The cmnd->underflow is the minimum number of bytes that must
876 * be transfered for this command. Provided a sense condition
877 * is not present, make sure the actual amount transferred is at
878 * least the underflow value or fail.
880 if (!(resp_info & SNS_LEN_VALID) &&
881 (scsi_status == SAM_STAT_GOOD) &&
882 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
883 < cmnd->underflow)) {
884 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
885 "0717 FCP command x%x residual "
886 "underrun converted to error "
887 "Data: x%x x%x x%x\n",
888 cmnd->cmnd[0], scsi_bufflen(cmnd),
889 scsi_get_resid(cmnd), cmnd->underflow);
890 host_status = DID_ERROR;
892 } else if (resp_info & RESID_OVER) {
893 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
894 "0720 FCP command x%x residual overrun error. "
895 "Data: x%x x%x \n", cmnd->cmnd[0],
896 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
897 host_status = DID_ERROR;
900 * Check SLI validation that all the transfer was actually done
901 * (fcpi_parm should be zero). Apply check only to reads.
903 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
904 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
905 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
906 "0734 FCP Read Check Error Data: "
908 be32_to_cpu(fcpcmd->fcpDl),
909 be32_to_cpu(fcprsp->rspResId),
910 fcpi_parm, cmnd->cmnd[0]);
911 host_status = DID_ERROR;
912 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
916 cmnd->result = ScsiResult(host_status, scsi_status);
917 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
921 * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
922 * @phba: The Hba for which this call is being executed.
923 * @pIocbIn: The command IOCBQ for the scsi cmnd.
924 * @pIocbOut: The response IOCBQ for the scsi cmnd .
926 * This routine assigns scsi command result by looking into response IOCB
927 * status field appropriately. This routine handles QUEUE FULL condition as
928 * well by ramping down device queue depth.
931 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
932 struct lpfc_iocbq *pIocbOut)
934 struct lpfc_scsi_buf *lpfc_cmd =
935 (struct lpfc_scsi_buf *) pIocbIn->context1;
936 struct lpfc_vport *vport = pIocbIn->vport;
937 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
938 struct lpfc_nodelist *pnode = rdata->pnode;
939 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
941 struct scsi_device *sdev, *tmp_sdev;
944 struct lpfc_fast_path_event *fast_path_evt;
946 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
947 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
948 if (pnode && NLP_CHK_NODE_ACT(pnode))
949 atomic_dec(&pnode->cmd_pending);
951 if (lpfc_cmd->status) {
952 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
953 (lpfc_cmd->result & IOERR_DRVR_MASK))
954 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
955 else if (lpfc_cmd->status >= IOSTAT_CNT)
956 lpfc_cmd->status = IOSTAT_DEFAULT;
958 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
959 "0729 FCP cmd x%x failed <%d/%d> "
960 "status: x%x result: x%x Data: x%x x%x\n",
962 cmd->device ? cmd->device->id : 0xffff,
963 cmd->device ? cmd->device->lun : 0xffff,
964 lpfc_cmd->status, lpfc_cmd->result,
965 pIocbOut->iocb.ulpContext,
966 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
968 switch (lpfc_cmd->status) {
969 case IOSTAT_FCP_RSP_ERROR:
970 /* Call FCP RSP handler to determine result */
971 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
973 case IOSTAT_NPORT_BSY:
974 case IOSTAT_FABRIC_BSY:
975 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
976 fast_path_evt = lpfc_alloc_fast_evt(phba);
979 fast_path_evt->un.fabric_evt.event_type =
981 fast_path_evt->un.fabric_evt.subcategory =
982 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
983 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
984 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
985 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
986 &pnode->nlp_portname,
987 sizeof(struct lpfc_name));
988 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
989 &pnode->nlp_nodename,
990 sizeof(struct lpfc_name));
992 fast_path_evt->vport = vport;
993 fast_path_evt->work_evt.evt =
994 LPFC_EVT_FASTPATH_MGMT_EVT;
995 spin_lock_irqsave(&phba->hbalock, flags);
996 list_add_tail(&fast_path_evt->work_evt.evt_listp,
998 spin_unlock_irqrestore(&phba->hbalock, flags);
999 lpfc_worker_wake_up(phba);
1001 case IOSTAT_LOCAL_REJECT:
1002 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
1003 lpfc_cmd->result == IOERR_NO_RESOURCES ||
1004 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
1005 cmd->result = ScsiResult(DID_REQUEUE, 0);
1007 } /* else: fall through */
1009 cmd->result = ScsiResult(DID_ERROR, 0);
1013 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
1014 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
1015 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
1018 cmd->result = ScsiResult(DID_OK, 0);
1021 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
1022 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
1024 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1025 "0710 Iodone <%d/%d> cmd %p, error "
1026 "x%x SNS x%x x%x Data: x%x x%x\n",
1027 cmd->device->id, cmd->device->lun, cmd,
1028 cmd->result, *lp, *(lp + 3), cmd->retries,
1029 scsi_get_resid(cmd));
1032 lpfc_update_stats(phba, lpfc_cmd);
1033 result = cmd->result;
1035 if (vport->cfg_max_scsicmpl_time &&
1036 time_after(jiffies, lpfc_cmd->start_time +
1037 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
1038 spin_lock_irqsave(sdev->host->host_lock, flags);
1039 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1040 if (pnode->cmd_qdepth >
1041 atomic_read(&pnode->cmd_pending) &&
1042 (atomic_read(&pnode->cmd_pending) >
1043 LPFC_MIN_TGT_QDEPTH) &&
1044 ((cmd->cmnd[0] == READ_10) ||
1045 (cmd->cmnd[0] == WRITE_10)))
1047 atomic_read(&pnode->cmd_pending);
1049 pnode->last_change_time = jiffies;
1051 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1052 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1053 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
1054 time_after(jiffies, pnode->last_change_time +
1055 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
1056 spin_lock_irqsave(sdev->host->host_lock, flags);
1057 pnode->cmd_qdepth += pnode->cmd_qdepth *
1058 LPFC_TGTQ_RAMPUP_PCENT / 100;
1059 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
1060 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
1061 pnode->last_change_time = jiffies;
1062 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1066 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1067 cmd->scsi_done(cmd);
1069 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1071 * If there is a thread waiting for command completion
1072 * wake up the thread.
1074 spin_lock_irqsave(sdev->host->host_lock, flags);
1075 lpfc_cmd->pCmd = NULL;
1076 if (lpfc_cmd->waitq)
1077 wake_up(lpfc_cmd->waitq);
1078 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1079 lpfc_release_scsi_buf(phba, lpfc_cmd);
1085 lpfc_rampup_queue_depth(vport, sdev);
1087 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
1088 ((jiffies - pnode->last_ramp_up_time) >
1089 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1090 ((jiffies - pnode->last_q_full_time) >
1091 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1092 (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
1093 shost_for_each_device(tmp_sdev, sdev->host) {
1094 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
1095 if (tmp_sdev->id != sdev->id)
1097 if (tmp_sdev->ordered_tags)
1098 scsi_adjust_queue_depth(tmp_sdev,
1100 tmp_sdev->queue_depth+1);
1102 scsi_adjust_queue_depth(tmp_sdev,
1104 tmp_sdev->queue_depth+1);
1106 pnode->last_ramp_up_time = jiffies;
1109 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1111 sdev->queue_depth - 1, sdev->queue_depth);
1115 * Check for queue full. If the lun is reporting queue full, then
1116 * back off the lun queue depth to prevent target overloads.
1118 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
1119 NLP_CHK_NODE_ACT(pnode)) {
1120 pnode->last_q_full_time = jiffies;
1122 shost_for_each_device(tmp_sdev, sdev->host) {
1123 if (tmp_sdev->id != sdev->id)
1125 depth = scsi_track_queue_full(tmp_sdev,
1126 tmp_sdev->queue_depth - 1);
1129 * The queue depth cannot be lowered any more.
1130 * Modify the returned error code to store
1131 * the final depth value set by
1132 * scsi_track_queue_full.
1135 depth = sdev->host->cmd_per_lun;
1138 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1139 "0711 detected queue full - lun queue "
1140 "depth adjusted to %d.\n", depth);
1141 lpfc_send_sdev_queuedepth_change_event(phba, vport,
1148 * If there is a thread waiting for command completion
1149 * wake up the thread.
1151 spin_lock_irqsave(sdev->host->host_lock, flags);
1152 lpfc_cmd->pCmd = NULL;
1153 if (lpfc_cmd->waitq)
1154 wake_up(lpfc_cmd->waitq);
1155 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1157 lpfc_release_scsi_buf(phba, lpfc_cmd);
1161 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
1162 * @data: A pointer to the immediate command data portion of the IOCB.
1163 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
1165 * The routine copies the entire FCP command from @fcp_cmnd to @data while
1166 * byte swapping the data to big endian format for transmission on the wire.
1169 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1172 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
1173 i += sizeof(uint32_t), j++) {
1174 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
1179 * lpfc_scsi_prep_cmnd: Routine to convert scsi cmnd to FCP information unit.
1180 * @vport: The virtual port for which this call is being executed.
1181 * @lpfc_cmd: The scsi command which needs to send.
1182 * @pnode: Pointer to lpfc_nodelist.
1184 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1188 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1189 struct lpfc_nodelist *pnode)
1191 struct lpfc_hba *phba = vport->phba;
1192 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1193 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1194 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1195 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
1196 int datadir = scsi_cmnd->sc_data_direction;
1199 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1202 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
1203 /* clear task management bits */
1204 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
1206 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
1207 &lpfc_cmd->fcp_cmnd->fcp_lun);
1209 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
1211 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
1213 case HEAD_OF_QUEUE_TAG:
1214 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
1216 case ORDERED_QUEUE_TAG:
1217 fcp_cmnd->fcpCntl1 = ORDERED_Q;
1220 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
1224 fcp_cmnd->fcpCntl1 = 0;
1227 * There are three possibilities here - use scatter-gather segment, use
1228 * the single mapping, or neither. Start the lpfc command prep by
1229 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1232 if (scsi_sg_count(scsi_cmnd)) {
1233 if (datadir == DMA_TO_DEVICE) {
1234 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
1235 iocb_cmd->un.fcpi.fcpi_parm = 0;
1236 iocb_cmd->ulpPU = 0;
1237 fcp_cmnd->fcpCntl3 = WRITE_DATA;
1238 phba->fc4OutputRequests++;
1240 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
1241 iocb_cmd->ulpPU = PARM_READ_CHECK;
1242 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1243 fcp_cmnd->fcpCntl3 = READ_DATA;
1244 phba->fc4InputRequests++;
1247 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
1248 iocb_cmd->un.fcpi.fcpi_parm = 0;
1249 iocb_cmd->ulpPU = 0;
1250 fcp_cmnd->fcpCntl3 = 0;
1251 phba->fc4ControlRequests++;
1253 if (phba->sli_rev == 3)
1254 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1256 * Finish initializing those IOCB fields that are independent
1257 * of the scsi_cmnd request_buffer
1259 piocbq->iocb.ulpContext = pnode->nlp_rpi;
1260 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
1261 piocbq->iocb.ulpFCP2Rcvy = 1;
1263 piocbq->iocb.ulpFCP2Rcvy = 0;
1265 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
1266 piocbq->context1 = lpfc_cmd;
1267 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
1268 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
1269 piocbq->vport = vport;
1273 * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
1274 * @vport: The virtual port for which this call is being executed.
1275 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
1276 * @lun: Logical unit number.
1277 * @task_mgmt_cmd: SCSI task management command.
1279 * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
1286 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1287 struct lpfc_scsi_buf *lpfc_cmd,
1289 uint8_t task_mgmt_cmd)
1291 struct lpfc_iocbq *piocbq;
1293 struct fcp_cmnd *fcp_cmnd;
1294 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
1295 struct lpfc_nodelist *ndlp = rdata->pnode;
1297 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1298 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
1301 piocbq = &(lpfc_cmd->cur_iocbq);
1302 piocbq->vport = vport;
1304 piocb = &piocbq->iocb;
1306 fcp_cmnd = lpfc_cmd->fcp_cmnd;
1307 /* Clear out any old data in the FCP command area */
1308 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1309 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
1310 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
1311 if (vport->phba->sli_rev == 3)
1312 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
1313 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
1314 piocb->ulpContext = ndlp->nlp_rpi;
1315 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
1316 piocb->ulpFCP2Rcvy = 1;
1318 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
1320 /* ulpTimeout is only one byte */
1321 if (lpfc_cmd->timeout > 0xff) {
1323 * Do not timeout the command at the firmware level.
1324 * The driver will provide the timeout mechanism.
1326 piocb->ulpTimeout = 0;
1328 piocb->ulpTimeout = lpfc_cmd->timeout;
1335 * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
1336 * @phba: The Hba for which this call is being executed.
1337 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
1338 * @rspiocbq: Pointer to lpfc_iocbq data structure.
1340 * This routine is IOCB completion routine for device reset and target reset
1341 * routine. This routine release scsi buffer associated with lpfc_cmd.
1344 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
1345 struct lpfc_iocbq *cmdiocbq,
1346 struct lpfc_iocbq *rspiocbq)
1348 struct lpfc_scsi_buf *lpfc_cmd =
1349 (struct lpfc_scsi_buf *) cmdiocbq->context1;
1351 lpfc_release_scsi_buf(phba, lpfc_cmd);
1356 * lpfc_scsi_tgt_reset: Target reset handler.
1357 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
1358 * @vport: The virtual port for which this call is being executed.
1359 * @tgt_id: Target ID.
1361 * @rdata: Pointer to lpfc_rport_data.
1363 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
1370 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
1371 unsigned tgt_id, unsigned int lun,
1372 struct lpfc_rport_data *rdata)
1374 struct lpfc_hba *phba = vport->phba;
1375 struct lpfc_iocbq *iocbq;
1376 struct lpfc_iocbq *iocbqrsp;
1380 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
1383 lpfc_cmd->rdata = rdata;
1384 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
1389 iocbq = &lpfc_cmd->cur_iocbq;
1390 iocbqrsp = lpfc_sli_get_iocbq(phba);
1395 /* Issue Target Reset to TGT <num> */
1396 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1397 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
1398 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
1399 status = lpfc_sli_issue_iocb_wait(phba,
1400 &phba->sli.ring[phba->sli.fcp_ring],
1401 iocbq, iocbqrsp, lpfc_cmd->timeout);
1402 if (status != IOCB_SUCCESS) {
1403 if (status == IOCB_TIMEDOUT) {
1404 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1405 ret = TIMEOUT_ERROR;
1408 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1411 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
1412 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
1413 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
1414 (lpfc_cmd->result & IOERR_DRVR_MASK))
1415 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1418 lpfc_sli_release_iocbq(phba, iocbqrsp);
1423 * lpfc_info: Info entry point of scsi_host_template data structure.
1424 * @host: The scsi host for which this call is being executed.
1426 * This routine provides module information about hba.
1429 * Pointer to char - Success.
1432 lpfc_info(struct Scsi_Host *host)
1434 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
1435 struct lpfc_hba *phba = vport->phba;
1437 static char lpfcinfobuf[384];
1439 memset(lpfcinfobuf,0,384);
1440 if (phba && phba->pcidev){
1441 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
1442 len = strlen(lpfcinfobuf);
1443 snprintf(lpfcinfobuf + len,
1445 " on PCI bus %02x device %02x irq %d",
1446 phba->pcidev->bus->number,
1447 phba->pcidev->devfn,
1449 len = strlen(lpfcinfobuf);
1450 if (phba->Port[0]) {
1451 snprintf(lpfcinfobuf + len,
1461 * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
1462 * @phba: The Hba for which this call is being executed.
1464 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
1465 * The default value of cfg_poll_tmo is 10 milliseconds.
1467 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1469 unsigned long poll_tmo_expires =
1470 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
1472 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
1473 mod_timer(&phba->fcp_poll_timer,
1478 * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
1479 * @phba: The Hba for which this call is being executed.
1481 * This routine starts the fcp_poll_timer of @phba.
1483 void lpfc_poll_start_timer(struct lpfc_hba * phba)
1485 lpfc_poll_rearm_timer(phba);
1489 * lpfc_poll_timeout: Restart polling timer.
1490 * @ptr: Map to lpfc_hba data structure pointer.
1492 * This routine restarts fcp_poll timer, when FCP ring polling is enable
1493 * and FCP Ring interrupt is disable.
1496 void lpfc_poll_timeout(unsigned long ptr)
1498 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
1500 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1501 lpfc_sli_poll_fcp_ring (phba);
1502 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1503 lpfc_poll_rearm_timer(phba);
1508 * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
1510 * @cmnd: Pointer to scsi_cmnd data structure.
1511 * @done: Pointer to done routine.
1513 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
1514 * This routine prepares an IOCB from scsi command and provides to firmware.
1515 * The @done callback is invoked after driver finished processing the command.
1519 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
1522 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1524 struct Scsi_Host *shost = cmnd->device->host;
1525 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1526 struct lpfc_hba *phba = vport->phba;
1527 struct lpfc_sli *psli = &phba->sli;
1528 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1529 struct lpfc_nodelist *ndlp = rdata->pnode;
1530 struct lpfc_scsi_buf *lpfc_cmd;
1531 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1534 err = fc_remote_port_chkready(rport);
1537 goto out_fail_command;
1541 * Catch race where our node has transitioned, but the
1542 * transport is still transitioning.
1544 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1545 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1546 goto out_fail_command;
1548 if (vport->cfg_max_scsicmpl_time &&
1549 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
1552 lpfc_cmd = lpfc_get_scsi_buf(phba);
1553 if (lpfc_cmd == NULL) {
1554 lpfc_rampdown_queue_depth(phba);
1556 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1557 "0707 driver's buffer pool is empty, "
1563 * Store the midlayer's command structure for the completion phase
1564 * and complete the command initialization.
1566 lpfc_cmd->pCmd = cmnd;
1567 lpfc_cmd->rdata = rdata;
1568 lpfc_cmd->timeout = 0;
1569 lpfc_cmd->start_time = jiffies;
1570 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
1571 cmnd->scsi_done = done;
1573 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1575 goto out_host_busy_free_buf;
1577 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
1579 atomic_inc(&ndlp->cmd_pending);
1580 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
1581 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
1583 atomic_dec(&ndlp->cmd_pending);
1584 goto out_host_busy_free_buf;
1586 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1587 lpfc_sli_poll_fcp_ring(phba);
1588 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1589 lpfc_poll_rearm_timer(phba);
1594 out_host_busy_free_buf:
1595 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1596 lpfc_release_scsi_buf(phba, lpfc_cmd);
1598 return SCSI_MLQUEUE_HOST_BUSY;
1606 * lpfc_block_error_handler: Routine to block error handler.
1607 * @cmnd: Pointer to scsi_cmnd data structure.
1609 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
1612 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1614 struct Scsi_Host *shost = cmnd->device->host;
1615 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1617 spin_lock_irq(shost->host_lock);
1618 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1619 spin_unlock_irq(shost->host_lock);
1621 spin_lock_irq(shost->host_lock);
1623 spin_unlock_irq(shost->host_lock);
1628 * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
1630 * @cmnd: Pointer to scsi_cmnd data structure.
1632 * This routine aborts @cmnd pending in base driver.
1639 lpfc_abort_handler(struct scsi_cmnd *cmnd)
1641 struct Scsi_Host *shost = cmnd->device->host;
1642 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1643 struct lpfc_hba *phba = vport->phba;
1644 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
1645 struct lpfc_iocbq *iocb;
1646 struct lpfc_iocbq *abtsiocb;
1647 struct lpfc_scsi_buf *lpfc_cmd;
1650 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
1652 lpfc_block_error_handler(cmnd);
1653 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
1657 * If pCmd field of the corresponding lpfc_scsi_buf structure
1658 * points to a different SCSI command, then the driver has
1659 * already completed this command, but the midlayer did not
1660 * see the completion before the eh fired. Just return
1663 iocb = &lpfc_cmd->cur_iocbq;
1664 if (lpfc_cmd->pCmd != cmnd)
1667 BUG_ON(iocb->context1 != lpfc_cmd);
1669 abtsiocb = lpfc_sli_get_iocbq(phba);
1670 if (abtsiocb == NULL) {
1676 * The scsi command can not be in txq and it is in flight because the
1677 * pCmd is still pointig at the SCSI command we have to abort. There
1678 * is no need to search the txcmplq. Just send an abort to the FW.
1682 icmd = &abtsiocb->iocb;
1683 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
1684 icmd->un.acxri.abortContextTag = cmd->ulpContext;
1685 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
1688 icmd->ulpClass = cmd->ulpClass;
1689 if (lpfc_is_link_up(phba))
1690 icmd->ulpCommand = CMD_ABORT_XRI_CN;
1692 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
1694 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
1695 abtsiocb->vport = vport;
1696 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
1697 lpfc_sli_release_iocbq(phba, abtsiocb);
1702 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1703 lpfc_sli_poll_fcp_ring (phba);
1705 lpfc_cmd->waitq = &waitq;
1706 /* Wait for abort to complete */
1707 wait_event_timeout(waitq,
1708 (lpfc_cmd->pCmd != cmnd),
1709 (2*vport->cfg_devloss_tmo*HZ));
1711 spin_lock_irq(shost->host_lock);
1712 lpfc_cmd->waitq = NULL;
1713 spin_unlock_irq(shost->host_lock);
1715 if (lpfc_cmd->pCmd == cmnd) {
1717 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1718 "0748 abort handler timed out waiting "
1719 "for abort to complete: ret %#x, ID %d, "
1720 "LUN %d, snum %#lx\n",
1721 ret, cmnd->device->id, cmnd->device->lun,
1722 cmnd->serial_number);
1726 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1727 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
1728 "LUN %d snum %#lx\n", ret, cmnd->device->id,
1729 cmnd->device->lun, cmnd->serial_number);
1734 * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
1736 * @cmnd: Pointer to scsi_cmnd data structure.
1738 * This routine does a device reset by sending a TARGET_RESET task management
1746 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1748 struct Scsi_Host *shost = cmnd->device->host;
1749 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1750 struct lpfc_hba *phba = vport->phba;
1751 struct lpfc_scsi_buf *lpfc_cmd;
1752 struct lpfc_iocbq *iocbq, *iocbqrsp;
1753 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1754 struct lpfc_nodelist *pnode = rdata->pnode;
1755 unsigned long later;
1759 struct lpfc_scsi_event_header scsi_event;
1761 lpfc_block_error_handler(cmnd);
1763 * If target is not in a MAPPED state, delay the reset until
1764 * target is rediscovered or devloss timeout expires.
1766 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1767 while (time_after(later, jiffies)) {
1768 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1770 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
1772 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1773 rdata = cmnd->device->hostdata;
1776 pnode = rdata->pnode;
1779 scsi_event.event_type = FC_REG_SCSI_EVENT;
1780 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
1782 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
1783 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
1785 fc_host_post_vendor_event(shost,
1786 fc_get_event_number(),
1788 (char *)&scsi_event,
1791 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1792 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1793 "0721 LUN Reset rport "
1794 "failure: msec x%x rdata x%p\n",
1795 jiffies_to_msecs(jiffies - later), rdata);
1798 lpfc_cmd = lpfc_get_scsi_buf(phba);
1799 if (lpfc_cmd == NULL)
1801 lpfc_cmd->timeout = 60;
1802 lpfc_cmd->rdata = rdata;
1804 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
1808 lpfc_release_scsi_buf(phba, lpfc_cmd);
1811 iocbq = &lpfc_cmd->cur_iocbq;
1813 /* get a buffer for this IOCB command response */
1814 iocbqrsp = lpfc_sli_get_iocbq(phba);
1815 if (iocbqrsp == NULL) {
1816 lpfc_release_scsi_buf(phba, lpfc_cmd);
1819 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1820 "0703 Issue target reset to TGT %d LUN %d "
1821 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
1822 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1823 status = lpfc_sli_issue_iocb_wait(phba,
1824 &phba->sli.ring[phba->sli.fcp_ring],
1825 iocbq, iocbqrsp, lpfc_cmd->timeout);
1826 if (status == IOCB_TIMEDOUT) {
1827 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1828 ret = TIMEOUT_ERROR;
1830 if (status != IOCB_SUCCESS)
1832 lpfc_release_scsi_buf(phba, lpfc_cmd);
1834 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1835 "0713 SCSI layer issued device reset (%d, %d) "
1836 "return x%x status x%x result x%x\n",
1837 cmnd->device->id, cmnd->device->lun, ret,
1838 iocbqrsp->iocb.ulpStatus,
1839 iocbqrsp->iocb.un.ulpWord[4]);
1840 lpfc_sli_release_iocbq(phba, iocbqrsp);
1841 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
1844 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1845 cmnd->device->id, cmnd->device->lun,
1847 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1848 while (time_after(later, jiffies) && cnt) {
1849 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1850 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
1851 cmnd->device->lun, LPFC_CTX_TGT);
1854 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1855 "0719 device reset I/O flush failure: "
1863 * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
1864 * Template data structure.
1865 * @cmnd: Pointer to scsi_cmnd data structure.
1867 * This routine does target reset to all target on @cmnd->device->host.
1874 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1876 struct Scsi_Host *shost = cmnd->device->host;
1877 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1878 struct lpfc_hba *phba = vport->phba;
1879 struct lpfc_nodelist *ndlp = NULL;
1881 int ret = SUCCESS, status = SUCCESS, i;
1883 struct lpfc_scsi_buf * lpfc_cmd;
1884 unsigned long later;
1885 struct lpfc_scsi_event_header scsi_event;
1887 scsi_event.event_type = FC_REG_SCSI_EVENT;
1888 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
1890 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
1891 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
1893 fc_host_post_vendor_event(shost,
1894 fc_get_event_number(),
1896 (char *)&scsi_event,
1899 lpfc_block_error_handler(cmnd);
1901 * Since the driver manages a single bus device, reset all
1902 * targets known to the driver. Should any target reset
1903 * fail, this routine returns failure to the midlayer.
1905 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1906 /* Search for mapped node by target ID */
1908 spin_lock_irq(shost->host_lock);
1909 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1910 if (!NLP_CHK_NODE_ACT(ndlp))
1912 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1913 ndlp->nlp_sid == i &&
1919 spin_unlock_irq(shost->host_lock);
1922 lpfc_cmd = lpfc_get_scsi_buf(phba);
1924 lpfc_cmd->timeout = 60;
1925 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1927 ndlp->rport->dd_data);
1928 if (status != TIMEOUT_ERROR)
1929 lpfc_release_scsi_buf(phba, lpfc_cmd);
1931 if (!lpfc_cmd || status != SUCCESS) {
1932 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1933 "0700 Bus Reset on target %d failed\n",
1939 * All outstanding txcmplq I/Os should have been aborted by
1940 * the targets. Unfortunately, some targets do not abide by
1941 * this forcing the driver to double check.
1943 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1945 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1946 0, 0, LPFC_CTX_HOST);
1947 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1948 while (time_after(later, jiffies) && cnt) {
1949 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1950 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1953 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1954 "0715 Bus Reset I/O flush failure: "
1955 "cnt x%x left x%x\n", cnt, i);
1958 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1959 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
1964 * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
1966 * @sdev: Pointer to scsi_device.
1968 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
1969 * globally available list of scsi buffers. This routine also makes sure scsi
1970 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
1971 * of scsi buffer exists for the lifetime of the driver.
1978 lpfc_slave_alloc(struct scsi_device *sdev)
1980 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1981 struct lpfc_hba *phba = vport->phba;
1982 struct lpfc_scsi_buf *scsi_buf = NULL;
1983 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1984 uint32_t total = 0, i;
1985 uint32_t num_to_alloc = 0;
1986 unsigned long flags;
1988 if (!rport || fc_remote_port_chkready(rport))
1991 sdev->hostdata = rport->dd_data;
1994 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1995 * available list of scsi buffers. Don't allocate more than the
1996 * HBA limit conveyed to the midlayer via the host structure. The
1997 * formula accounts for the lun_queue_depth + error handlers + 1
1998 * extra. This list of scsi bufs exists for the lifetime of the driver.
2000 total = phba->total_scsi_bufs;
2001 num_to_alloc = vport->cfg_lun_queue_depth + 2;
2003 /* Allow some exchanges to be available always to complete discovery */
2004 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2005 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2006 "0704 At limitation of %d preallocated "
2007 "command buffers\n", total);
2009 /* Allow some exchanges to be available always to complete discovery */
2010 } else if (total + num_to_alloc >
2011 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2012 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2013 "0705 Allocation request of %d "
2014 "command buffers will exceed max of %d. "
2015 "Reducing allocation request to %d.\n",
2016 num_to_alloc, phba->cfg_hba_queue_depth,
2017 (phba->cfg_hba_queue_depth - total));
2018 num_to_alloc = phba->cfg_hba_queue_depth - total;
2021 for (i = 0; i < num_to_alloc; i++) {
2022 scsi_buf = lpfc_new_scsi_buf(vport);
2024 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2025 "0706 Failed to allocate "
2026 "command buffer\n");
2030 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2031 phba->total_scsi_bufs++;
2032 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2033 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2039 * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
2041 * @sdev: Pointer to scsi_device.
2043 * This routine configures following items
2044 * - Tag command queuing support for @sdev if supported.
2045 * - Dev loss time out value of fc_rport.
2046 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
2052 lpfc_slave_configure(struct scsi_device *sdev)
2054 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2055 struct lpfc_hba *phba = vport->phba;
2056 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2058 if (sdev->tagged_supported)
2059 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
2061 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
2064 * Initialize the fc transport attributes for the target
2065 * containing this scsi device. Also note that the driver's
2066 * target pointer is stored in the starget_data for the
2067 * driver's sysfs entry point functions.
2069 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
2071 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2072 lpfc_sli_poll_fcp_ring(phba);
2073 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2074 lpfc_poll_rearm_timer(phba);
2081 * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
2082 * @sdev: Pointer to scsi_device.
2084 * This routine sets @sdev hostatdata filed to null.
2087 lpfc_slave_destroy(struct scsi_device *sdev)
2089 sdev->hostdata = NULL;
2094 struct scsi_host_template lpfc_template = {
2095 .module = THIS_MODULE,
2096 .name = LPFC_DRIVER_NAME,
2098 .queuecommand = lpfc_queuecommand,
2099 .eh_abort_handler = lpfc_abort_handler,
2100 .eh_device_reset_handler= lpfc_device_reset_handler,
2101 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2102 .slave_alloc = lpfc_slave_alloc,
2103 .slave_configure = lpfc_slave_configure,
2104 .slave_destroy = lpfc_slave_destroy,
2105 .scan_finished = lpfc_scan_finished,
2107 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
2108 .cmd_per_lun = LPFC_CMD_PER_LUN,
2109 .use_clustering = ENABLE_CLUSTERING,
2110 .shost_attrs = lpfc_hba_attrs,
2111 .max_sectors = 0xFFFF,
2114 struct scsi_host_template lpfc_vport_template = {
2115 .module = THIS_MODULE,
2116 .name = LPFC_DRIVER_NAME,
2118 .queuecommand = lpfc_queuecommand,
2119 .eh_abort_handler = lpfc_abort_handler,
2120 .eh_device_reset_handler= lpfc_device_reset_handler,
2121 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2122 .slave_alloc = lpfc_slave_alloc,
2123 .slave_configure = lpfc_slave_configure,
2124 .slave_destroy = lpfc_slave_destroy,
2125 .scan_finished = lpfc_scan_finished,
2127 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
2128 .cmd_per_lun = LPFC_CMD_PER_LUN,
2129 .use_clustering = ENABLE_CLUSTERING,
2130 .shost_attrs = lpfc_vport_attrs,
2131 .max_sectors = 0xFFFF,