1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
32 #include "lpfc_version.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
40 #include "lpfc_vport.h"
42 #define LPFC_RESET_WAIT 2
43 #define LPFC_ABORT_WAIT 2
46 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware.
50 lpfc_adjust_queue_depth(struct lpfc_hba *phba)
55 spin_lock_irqsave(&phba->hbalock, flags);
56 atomic_inc(&phba->num_rsrc_err);
57 phba->last_rsrc_error_time = jiffies;
59 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
60 spin_unlock_irqrestore(&phba->hbalock, flags);
64 phba->last_ramp_down_time = jiffies;
66 spin_unlock_irqrestore(&phba->hbalock, flags);
68 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
69 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
71 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
75 lpfc_worker_wake_up(phba);
80 * This function is called with no lock held when there is a successful
81 * SCSI command completion.
84 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
85 struct scsi_device *sdev)
88 struct lpfc_hba *phba = vport->phba;
90 atomic_inc(&phba->num_cmd_success);
92 if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
94 spin_lock_irqsave(&phba->hbalock, flags);
95 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
96 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
97 spin_unlock_irqrestore(&phba->hbalock, flags);
100 phba->last_ramp_up_time = jiffies;
101 spin_unlock_irqrestore(&phba->hbalock, flags);
103 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
104 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
106 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
107 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
110 lpfc_worker_wake_up(phba);
115 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
117 struct lpfc_vport **vports;
118 struct Scsi_Host *shost;
119 struct scsi_device *sdev;
120 unsigned long new_queue_depth;
121 unsigned long num_rsrc_err, num_cmd_success;
124 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
125 num_cmd_success = atomic_read(&phba->num_cmd_success);
127 vports = lpfc_create_vport_work_array(phba);
129 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
130 shost = lpfc_shost_from_vport(vports[i]);
131 shost_for_each_device(sdev, shost) {
133 sdev->queue_depth * num_rsrc_err /
134 (num_rsrc_err + num_cmd_success);
135 if (!new_queue_depth)
136 new_queue_depth = sdev->queue_depth - 1;
138 new_queue_depth = sdev->queue_depth -
140 if (sdev->ordered_tags)
141 scsi_adjust_queue_depth(sdev,
145 scsi_adjust_queue_depth(sdev,
150 lpfc_destroy_vport_work_array(phba, vports);
151 atomic_set(&phba->num_rsrc_err, 0);
152 atomic_set(&phba->num_cmd_success, 0);
156 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
158 struct lpfc_vport **vports;
159 struct Scsi_Host *shost;
160 struct scsi_device *sdev;
163 vports = lpfc_create_vport_work_array(phba);
165 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
166 shost = lpfc_shost_from_vport(vports[i]);
167 shost_for_each_device(sdev, shost) {
168 if (vports[i]->cfg_lun_queue_depth <=
171 if (sdev->ordered_tags)
172 scsi_adjust_queue_depth(sdev,
174 sdev->queue_depth+1);
176 scsi_adjust_queue_depth(sdev,
178 sdev->queue_depth+1);
181 lpfc_destroy_vport_work_array(phba, vports);
182 atomic_set(&phba->num_rsrc_err, 0);
183 atomic_set(&phba->num_cmd_success, 0);
187 * lpfc_scsi_dev_block: set all scsi hosts to block state.
188 * @phba: Pointer to HBA context object.
190 * This function walks vport list and set each SCSI host to block state
191 * by invoking fc_remote_port_delete() routine. This function is invoked
192 * with EEH when device's PCI slot has been permanently disabled.
195 lpfc_scsi_dev_block(struct lpfc_hba *phba)
197 struct lpfc_vport **vports;
198 struct Scsi_Host *shost;
199 struct scsi_device *sdev;
200 struct fc_rport *rport;
203 vports = lpfc_create_vport_work_array(phba);
205 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
206 shost = lpfc_shost_from_vport(vports[i]);
207 shost_for_each_device(sdev, shost) {
208 rport = starget_to_rport(scsi_target(sdev));
209 fc_remote_port_delete(rport);
212 lpfc_destroy_vport_work_array(phba, vports);
216 * This routine allocates a scsi buffer, which contains all the necessary
217 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
218 * contains information to build the IOCB. The DMAable region contains
219 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
220 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
221 * and the BPL BDE is setup in the IOCB.
223 static struct lpfc_scsi_buf *
224 lpfc_new_scsi_buf(struct lpfc_vport *vport)
226 struct lpfc_hba *phba = vport->phba;
227 struct lpfc_scsi_buf *psb;
228 struct ulp_bde64 *bpl;
230 dma_addr_t pdma_phys_fcp_cmd;
231 dma_addr_t pdma_phys_fcp_rsp;
232 dma_addr_t pdma_phys_bpl;
235 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
240 * Get memory from the pci pool to map the virt space to pci bus space
241 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
242 * struct fcp_rsp and the number of bde's necessary to support the
245 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
252 /* Initialize virtual ptrs to dma_buf region. */
253 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
255 /* Allocate iotag for psb->cur_iocbq. */
256 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
258 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
259 psb->data, psb->dma_handle);
263 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
265 psb->fcp_cmnd = psb->data;
266 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
267 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
268 sizeof(struct fcp_rsp);
270 /* Initialize local short-hand pointers. */
272 pdma_phys_fcp_cmd = psb->dma_handle;
273 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
274 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
275 sizeof(struct fcp_rsp);
278 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
279 * list bdes. Initialize the first two and leave the rest for
282 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
283 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
284 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
285 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
286 bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
288 /* Setup the physical region for the FCP RSP */
289 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
290 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
291 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
292 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
293 bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
296 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
297 * initialize it with all known data now.
299 iocb = &psb->cur_iocbq.iocb;
300 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
301 if (phba->sli_rev == 3) {
302 /* fill in immediate fcp command BDE */
303 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
304 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
305 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
307 iocb->un.fcpi64.bdl.addrHigh = 0;
308 iocb->ulpBdeCount = 0;
310 /* fill in responce BDE */
311 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
312 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
313 sizeof(struct fcp_rsp);
314 iocb->unsli3.fcp_ext.rbde.addrLow =
315 putPaddrLow(pdma_phys_fcp_rsp);
316 iocb->unsli3.fcp_ext.rbde.addrHigh =
317 putPaddrHigh(pdma_phys_fcp_rsp);
319 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
320 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
321 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
322 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
323 iocb->ulpBdeCount = 1;
326 iocb->ulpClass = CLASS3;
331 static struct lpfc_scsi_buf*
332 lpfc_get_scsi_buf(struct lpfc_hba * phba)
334 struct lpfc_scsi_buf * lpfc_cmd = NULL;
335 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
336 unsigned long iflag = 0;
338 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
339 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
341 lpfc_cmd->seg_cnt = 0;
342 lpfc_cmd->nonsg_phys = 0;
344 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
349 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
351 unsigned long iflag = 0;
353 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
355 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
356 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
360 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
362 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
363 struct scatterlist *sgel = NULL;
364 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
365 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
366 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
367 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
369 uint32_t num_bde = 0;
370 int nseg, datadir = scsi_cmnd->sc_data_direction;
373 * There are three possibilities here - use scatter-gather segment, use
374 * the single mapping, or neither. Start the lpfc command prep by
375 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
379 if (scsi_sg_count(scsi_cmnd)) {
381 * The driver stores the segment count returned from pci_map_sg
382 * because this a count of dma-mappings used to map the use_sg
383 * pages. They are not guaranteed to be the same for those
384 * architectures that implement an IOMMU.
387 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
388 scsi_sg_count(scsi_cmnd), datadir);
392 lpfc_cmd->seg_cnt = nseg;
393 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
394 printk(KERN_ERR "%s: Too many sg segments from "
395 "dma_map_sg. Config %d, seg_cnt %d",
396 __func__, phba->cfg_sg_seg_cnt,
398 scsi_dma_unmap(scsi_cmnd);
403 * The driver established a maximum scatter-gather segment count
404 * during probe that limits the number of sg elements in any
405 * single scsi command. Just run through the seg_cnt and format
407 * When using SLI-3 the driver will try to fit all the BDEs into
408 * the IOCB. If it can't then the BDEs get added to a BPL as it
409 * does for SLI-2 mode.
411 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
412 physaddr = sg_dma_address(sgel);
413 if (phba->sli_rev == 3 &&
414 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
415 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
416 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
417 data_bde->addrLow = putPaddrLow(physaddr);
418 data_bde->addrHigh = putPaddrHigh(physaddr);
421 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
422 bpl->tus.f.bdeSize = sg_dma_len(sgel);
423 bpl->tus.w = le32_to_cpu(bpl->tus.w);
425 le32_to_cpu(putPaddrLow(physaddr));
427 le32_to_cpu(putPaddrHigh(physaddr));
434 * Finish initializing those IOCB fields that are dependent on the
435 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
436 * explicitly reinitialized and for SLI-3 the extended bde count is
437 * explicitly reinitialized since all iocb memory resources are reused.
439 if (phba->sli_rev == 3) {
440 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
442 * The extended IOCB format can only fit 3 BDE or a BPL.
443 * This I/O has more than 3 BDE so the 1st data bde will
444 * be a BPL that is filled in here.
446 physaddr = lpfc_cmd->dma_handle;
447 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
448 data_bde->tus.f.bdeSize = (num_bde *
449 sizeof(struct ulp_bde64));
450 physaddr += (sizeof(struct fcp_cmnd) +
451 sizeof(struct fcp_rsp) +
452 (2 * sizeof(struct ulp_bde64)));
453 data_bde->addrHigh = putPaddrHigh(physaddr);
454 data_bde->addrLow = putPaddrLow(physaddr);
455 /* ebde count includes the responce bde and data bpl */
456 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
458 /* ebde count includes the responce bde and data bdes */
459 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
462 iocb_cmd->un.fcpi64.bdl.bdeSize =
463 ((num_bde + 2) * sizeof(struct ulp_bde64));
465 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
470 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
473 * There are only two special cases to consider. (1) the scsi command
474 * requested scatter-gather usage or (2) the scsi command allocated
475 * a request buffer, but did not request use_sg. There is a third
476 * case, but it does not require resource deallocation.
478 if (psb->seg_cnt > 0)
479 scsi_dma_unmap(psb->pCmd);
483 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
484 struct lpfc_iocbq *rsp_iocb)
486 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
487 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
488 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
489 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
490 uint32_t resp_info = fcprsp->rspStatus2;
491 uint32_t scsi_status = fcprsp->rspStatus3;
493 uint32_t host_status = DID_OK;
495 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
498 * If this is a task management command, there is no
499 * scsi packet associated with this lpfc_cmd. The driver
502 if (fcpcmd->fcpCntl2) {
507 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
508 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
509 if (snslen > SCSI_SENSE_BUFFERSIZE)
510 snslen = SCSI_SENSE_BUFFERSIZE;
512 if (resp_info & RSP_LEN_VALID)
513 rsplen = be32_to_cpu(fcprsp->rspRspLen);
514 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
516 lp = (uint32_t *)cmnd->sense_buffer;
518 if (!scsi_status && (resp_info & RESID_UNDER))
521 lpfc_printf_vlog(vport, KERN_WARNING, logit,
522 "0730 FCP command x%x failed: x%x SNS x%x x%x "
523 "Data: x%x x%x x%x x%x x%x\n",
524 cmnd->cmnd[0], scsi_status,
525 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
526 be32_to_cpu(fcprsp->rspResId),
527 be32_to_cpu(fcprsp->rspSnsLen),
528 be32_to_cpu(fcprsp->rspRspLen),
531 if (resp_info & RSP_LEN_VALID) {
532 rsplen = be32_to_cpu(fcprsp->rspRspLen);
533 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
534 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
535 host_status = DID_ERROR;
540 scsi_set_resid(cmnd, 0);
541 if (resp_info & RESID_UNDER) {
542 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
544 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
545 "0716 FCP Read Underrun, expected %d, "
546 "residual %d Data: x%x x%x x%x\n",
547 be32_to_cpu(fcpcmd->fcpDl),
548 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
552 * If there is an under run check if under run reported by
553 * storage array is same as the under run reported by HBA.
554 * If this is not same, there is a dropped frame.
556 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
558 (scsi_get_resid(cmnd) != fcpi_parm)) {
559 lpfc_printf_vlog(vport, KERN_WARNING,
560 LOG_FCP | LOG_FCP_ERROR,
561 "0735 FCP Read Check Error "
562 "and Underrun Data: x%x x%x x%x x%x\n",
563 be32_to_cpu(fcpcmd->fcpDl),
564 scsi_get_resid(cmnd), fcpi_parm,
566 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
567 host_status = DID_ERROR;
570 * The cmnd->underflow is the minimum number of bytes that must
571 * be transfered for this command. Provided a sense condition
572 * is not present, make sure the actual amount transferred is at
573 * least the underflow value or fail.
575 if (!(resp_info & SNS_LEN_VALID) &&
576 (scsi_status == SAM_STAT_GOOD) &&
577 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
578 < cmnd->underflow)) {
579 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
580 "0717 FCP command x%x residual "
581 "underrun converted to error "
582 "Data: x%x x%x x%x\n",
583 cmnd->cmnd[0], scsi_bufflen(cmnd),
584 scsi_get_resid(cmnd), cmnd->underflow);
585 host_status = DID_ERROR;
587 } else if (resp_info & RESID_OVER) {
588 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
589 "0720 FCP command x%x residual overrun error. "
590 "Data: x%x x%x \n", cmnd->cmnd[0],
591 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
592 host_status = DID_ERROR;
595 * Check SLI validation that all the transfer was actually done
596 * (fcpi_parm should be zero). Apply check only to reads.
598 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
599 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
600 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
601 "0734 FCP Read Check Error Data: "
603 be32_to_cpu(fcpcmd->fcpDl),
604 be32_to_cpu(fcprsp->rspResId),
605 fcpi_parm, cmnd->cmnd[0]);
606 host_status = DID_ERROR;
607 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
611 cmnd->result = ScsiResult(host_status, scsi_status);
615 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
616 struct lpfc_iocbq *pIocbOut)
618 struct lpfc_scsi_buf *lpfc_cmd =
619 (struct lpfc_scsi_buf *) pIocbIn->context1;
620 struct lpfc_vport *vport = pIocbIn->vport;
621 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
622 struct lpfc_nodelist *pnode = rdata->pnode;
623 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
625 struct scsi_device *sdev, *tmp_sdev;
629 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
630 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
631 atomic_dec(&pnode->cmd_pending);
633 if (lpfc_cmd->status) {
634 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
635 (lpfc_cmd->result & IOERR_DRVR_MASK))
636 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
637 else if (lpfc_cmd->status >= IOSTAT_CNT)
638 lpfc_cmd->status = IOSTAT_DEFAULT;
640 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
641 "0729 FCP cmd x%x failed <%d/%d> "
642 "status: x%x result: x%x Data: x%x x%x\n",
644 cmd->device ? cmd->device->id : 0xffff,
645 cmd->device ? cmd->device->lun : 0xffff,
646 lpfc_cmd->status, lpfc_cmd->result,
647 pIocbOut->iocb.ulpContext,
648 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
650 switch (lpfc_cmd->status) {
651 case IOSTAT_FCP_RSP_ERROR:
652 /* Call FCP RSP handler to determine result */
653 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
655 case IOSTAT_NPORT_BSY:
656 case IOSTAT_FABRIC_BSY:
657 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
659 case IOSTAT_LOCAL_REJECT:
660 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
661 lpfc_cmd->result == IOERR_NO_RESOURCES ||
662 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
663 cmd->result = ScsiResult(DID_REQUEUE, 0);
665 } /* else: fall through */
667 cmd->result = ScsiResult(DID_ERROR, 0);
671 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
672 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
673 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
676 cmd->result = ScsiResult(DID_OK, 0);
679 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
680 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
682 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
683 "0710 Iodone <%d/%d> cmd %p, error "
684 "x%x SNS x%x x%x Data: x%x x%x\n",
685 cmd->device->id, cmd->device->lun, cmd,
686 cmd->result, *lp, *(lp + 3), cmd->retries,
687 scsi_get_resid(cmd));
690 result = cmd->result;
692 if (vport->cfg_max_scsicmpl_time &&
693 time_after(jiffies, lpfc_cmd->start_time +
694 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
695 spin_lock_irqsave(sdev->host->host_lock, flags);
696 if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) &&
697 (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) &&
698 ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))))
699 pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending);
701 pnode->last_change_time = jiffies;
702 spin_unlock_irqrestore(sdev->host->host_lock, flags);
703 } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
704 time_after(jiffies, pnode->last_change_time +
705 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
706 spin_lock_irqsave(sdev->host->host_lock, flags);
707 pnode->cmd_qdepth += pnode->cmd_qdepth *
708 LPFC_TGTQ_RAMPUP_PCENT / 100;
709 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
710 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
711 pnode->last_change_time = jiffies;
712 spin_unlock_irqrestore(sdev->host->host_lock, flags);
715 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
718 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
720 * If there is a thread waiting for command completion
721 * wake up the thread.
723 spin_lock_irqsave(sdev->host->host_lock, flags);
724 lpfc_cmd->pCmd = NULL;
726 wake_up(lpfc_cmd->waitq);
727 spin_unlock_irqrestore(sdev->host->host_lock, flags);
728 lpfc_release_scsi_buf(phba, lpfc_cmd);
734 lpfc_rampup_queue_depth(vport, sdev);
736 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
737 ((jiffies - pnode->last_ramp_up_time) >
738 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
739 ((jiffies - pnode->last_q_full_time) >
740 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
741 (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
742 shost_for_each_device(tmp_sdev, sdev->host) {
743 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
744 if (tmp_sdev->id != sdev->id)
746 if (tmp_sdev->ordered_tags)
747 scsi_adjust_queue_depth(tmp_sdev,
749 tmp_sdev->queue_depth+1);
751 scsi_adjust_queue_depth(tmp_sdev,
753 tmp_sdev->queue_depth+1);
755 pnode->last_ramp_up_time = jiffies;
761 * Check for queue full. If the lun is reporting queue full, then
762 * back off the lun queue depth to prevent target overloads.
764 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
765 NLP_CHK_NODE_ACT(pnode)) {
766 pnode->last_q_full_time = jiffies;
768 shost_for_each_device(tmp_sdev, sdev->host) {
769 if (tmp_sdev->id != sdev->id)
771 depth = scsi_track_queue_full(tmp_sdev,
772 tmp_sdev->queue_depth - 1);
775 * The queue depth cannot be lowered any more.
776 * Modify the returned error code to store
777 * the final depth value set by
778 * scsi_track_queue_full.
781 depth = sdev->host->cmd_per_lun;
784 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
785 "0711 detected queue full - lun queue "
786 "depth adjusted to %d.\n", depth);
791 * If there is a thread waiting for command completion
792 * wake up the thread.
794 spin_lock_irqsave(sdev->host->host_lock, flags);
795 lpfc_cmd->pCmd = NULL;
797 wake_up(lpfc_cmd->waitq);
798 spin_unlock_irqrestore(sdev->host->host_lock, flags);
800 lpfc_release_scsi_buf(phba, lpfc_cmd);
804 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
805 * @data: A pointer to the immediate command data portion of the IOCB.
806 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
808 * The routine copies the entire FCP command from @fcp_cmnd to @data while
809 * byte swapping the data to big endian format for transmission on the wire.
812 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
815 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
816 i += sizeof(uint32_t), j++) {
817 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
822 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
823 struct lpfc_nodelist *pnode)
825 struct lpfc_hba *phba = vport->phba;
826 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
827 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
828 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
829 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
830 int datadir = scsi_cmnd->sc_data_direction;
833 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
836 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
837 /* clear task management bits */
838 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
840 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
841 &lpfc_cmd->fcp_cmnd->fcp_lun);
843 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
845 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
847 case HEAD_OF_QUEUE_TAG:
848 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
850 case ORDERED_QUEUE_TAG:
851 fcp_cmnd->fcpCntl1 = ORDERED_Q;
854 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
858 fcp_cmnd->fcpCntl1 = 0;
861 * There are three possibilities here - use scatter-gather segment, use
862 * the single mapping, or neither. Start the lpfc command prep by
863 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
866 if (scsi_sg_count(scsi_cmnd)) {
867 if (datadir == DMA_TO_DEVICE) {
868 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
869 iocb_cmd->un.fcpi.fcpi_parm = 0;
871 fcp_cmnd->fcpCntl3 = WRITE_DATA;
872 phba->fc4OutputRequests++;
874 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
875 iocb_cmd->ulpPU = PARM_READ_CHECK;
876 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
877 fcp_cmnd->fcpCntl3 = READ_DATA;
878 phba->fc4InputRequests++;
881 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
882 iocb_cmd->un.fcpi.fcpi_parm = 0;
884 fcp_cmnd->fcpCntl3 = 0;
885 phba->fc4ControlRequests++;
887 if (phba->sli_rev == 3)
888 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
890 * Finish initializing those IOCB fields that are independent
891 * of the scsi_cmnd request_buffer
893 piocbq->iocb.ulpContext = pnode->nlp_rpi;
894 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
895 piocbq->iocb.ulpFCP2Rcvy = 1;
897 piocbq->iocb.ulpFCP2Rcvy = 0;
899 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
900 piocbq->context1 = lpfc_cmd;
901 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
902 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
903 piocbq->vport = vport;
907 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
908 struct lpfc_scsi_buf *lpfc_cmd,
910 uint8_t task_mgmt_cmd)
912 struct lpfc_iocbq *piocbq;
914 struct fcp_cmnd *fcp_cmnd;
915 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
916 struct lpfc_nodelist *ndlp = rdata->pnode;
918 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
919 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
922 piocbq = &(lpfc_cmd->cur_iocbq);
923 piocbq->vport = vport;
925 piocb = &piocbq->iocb;
927 fcp_cmnd = lpfc_cmd->fcp_cmnd;
928 /* Clear out any old data in the FCP command area */
929 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
930 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
931 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
932 if (vport->phba->sli_rev == 3)
933 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
934 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
935 piocb->ulpContext = ndlp->nlp_rpi;
936 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
937 piocb->ulpFCP2Rcvy = 1;
939 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
941 /* ulpTimeout is only one byte */
942 if (lpfc_cmd->timeout > 0xff) {
944 * Do not timeout the command at the firmware level.
945 * The driver will provide the timeout mechanism.
947 piocb->ulpTimeout = 0;
949 piocb->ulpTimeout = lpfc_cmd->timeout;
956 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
957 struct lpfc_iocbq *cmdiocbq,
958 struct lpfc_iocbq *rspiocbq)
960 struct lpfc_scsi_buf *lpfc_cmd =
961 (struct lpfc_scsi_buf *) cmdiocbq->context1;
963 lpfc_release_scsi_buf(phba, lpfc_cmd);
968 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
969 unsigned tgt_id, unsigned int lun,
970 struct lpfc_rport_data *rdata)
972 struct lpfc_hba *phba = vport->phba;
973 struct lpfc_iocbq *iocbq;
974 struct lpfc_iocbq *iocbqrsp;
978 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
981 lpfc_cmd->rdata = rdata;
982 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
987 iocbq = &lpfc_cmd->cur_iocbq;
988 iocbqrsp = lpfc_sli_get_iocbq(phba);
993 /* Issue Target Reset to TGT <num> */
994 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
995 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
996 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
997 status = lpfc_sli_issue_iocb_wait(phba,
998 &phba->sli.ring[phba->sli.fcp_ring],
999 iocbq, iocbqrsp, lpfc_cmd->timeout);
1000 if (status != IOCB_SUCCESS) {
1001 if (status == IOCB_TIMEDOUT) {
1002 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1003 ret = TIMEOUT_ERROR;
1006 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1009 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
1010 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
1011 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
1012 (lpfc_cmd->result & IOERR_DRVR_MASK))
1013 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1016 lpfc_sli_release_iocbq(phba, iocbqrsp);
1021 lpfc_info(struct Scsi_Host *host)
1023 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
1024 struct lpfc_hba *phba = vport->phba;
1026 static char lpfcinfobuf[384];
1028 memset(lpfcinfobuf,0,384);
1029 if (phba && phba->pcidev){
1030 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
1031 len = strlen(lpfcinfobuf);
1032 snprintf(lpfcinfobuf + len,
1034 " on PCI bus %02x device %02x irq %d",
1035 phba->pcidev->bus->number,
1036 phba->pcidev->devfn,
1038 len = strlen(lpfcinfobuf);
1039 if (phba->Port[0]) {
1040 snprintf(lpfcinfobuf + len,
1049 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1051 unsigned long poll_tmo_expires =
1052 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
1054 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
1055 mod_timer(&phba->fcp_poll_timer,
1059 void lpfc_poll_start_timer(struct lpfc_hba * phba)
1061 lpfc_poll_rearm_timer(phba);
1064 void lpfc_poll_timeout(unsigned long ptr)
1066 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
1068 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1069 lpfc_sli_poll_fcp_ring (phba);
1070 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1071 lpfc_poll_rearm_timer(phba);
1076 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1078 struct Scsi_Host *shost = cmnd->device->host;
1079 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1080 struct lpfc_hba *phba = vport->phba;
1081 struct lpfc_sli *psli = &phba->sli;
1082 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1083 struct lpfc_nodelist *ndlp = rdata->pnode;
1084 struct lpfc_scsi_buf *lpfc_cmd;
1085 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1088 err = fc_remote_port_chkready(rport);
1091 goto out_fail_command;
1095 * Catch race where our node has transitioned, but the
1096 * transport is still transitioning.
1098 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1099 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1100 goto out_fail_command;
1102 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
1105 lpfc_cmd = lpfc_get_scsi_buf(phba);
1106 if (lpfc_cmd == NULL) {
1107 lpfc_adjust_queue_depth(phba);
1109 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1110 "0707 driver's buffer pool is empty, "
1116 * Store the midlayer's command structure for the completion phase
1117 * and complete the command initialization.
1119 lpfc_cmd->pCmd = cmnd;
1120 lpfc_cmd->rdata = rdata;
1121 lpfc_cmd->timeout = 0;
1122 lpfc_cmd->start_time = jiffies;
1123 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
1124 cmnd->scsi_done = done;
1126 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1128 goto out_host_busy_free_buf;
1130 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
1132 atomic_inc(&ndlp->cmd_pending);
1133 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
1134 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
1136 goto out_host_busy_free_buf;
1138 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1139 lpfc_sli_poll_fcp_ring(phba);
1140 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1141 lpfc_poll_rearm_timer(phba);
1146 out_host_busy_free_buf:
1147 atomic_dec(&ndlp->cmd_pending);
1148 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1149 lpfc_release_scsi_buf(phba, lpfc_cmd);
1151 return SCSI_MLQUEUE_HOST_BUSY;
1159 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1161 struct Scsi_Host *shost = cmnd->device->host;
1162 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1164 spin_lock_irq(shost->host_lock);
1165 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1166 spin_unlock_irq(shost->host_lock);
1168 spin_lock_irq(shost->host_lock);
1170 spin_unlock_irq(shost->host_lock);
1175 lpfc_abort_handler(struct scsi_cmnd *cmnd)
1177 struct Scsi_Host *shost = cmnd->device->host;
1178 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1179 struct lpfc_hba *phba = vport->phba;
1180 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
1181 struct lpfc_iocbq *iocb;
1182 struct lpfc_iocbq *abtsiocb;
1183 struct lpfc_scsi_buf *lpfc_cmd;
1186 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
1188 lpfc_block_error_handler(cmnd);
1189 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
1193 * If pCmd field of the corresponding lpfc_scsi_buf structure
1194 * points to a different SCSI command, then the driver has
1195 * already completed this command, but the midlayer did not
1196 * see the completion before the eh fired. Just return
1199 iocb = &lpfc_cmd->cur_iocbq;
1200 if (lpfc_cmd->pCmd != cmnd)
1203 BUG_ON(iocb->context1 != lpfc_cmd);
1205 abtsiocb = lpfc_sli_get_iocbq(phba);
1206 if (abtsiocb == NULL) {
1212 * The scsi command can not be in txq and it is in flight because the
1213 * pCmd is still pointig at the SCSI command we have to abort. There
1214 * is no need to search the txcmplq. Just send an abort to the FW.
1218 icmd = &abtsiocb->iocb;
1219 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
1220 icmd->un.acxri.abortContextTag = cmd->ulpContext;
1221 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
1224 icmd->ulpClass = cmd->ulpClass;
1225 if (lpfc_is_link_up(phba))
1226 icmd->ulpCommand = CMD_ABORT_XRI_CN;
1228 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
1230 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
1231 abtsiocb->vport = vport;
1232 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
1233 lpfc_sli_release_iocbq(phba, abtsiocb);
1238 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1239 lpfc_sli_poll_fcp_ring (phba);
1241 lpfc_cmd->waitq = &waitq;
1242 /* Wait for abort to complete */
1243 wait_event_timeout(waitq,
1244 (lpfc_cmd->pCmd != cmnd),
1245 (2*vport->cfg_devloss_tmo*HZ));
1247 spin_lock_irq(shost->host_lock);
1248 lpfc_cmd->waitq = NULL;
1249 spin_unlock_irq(shost->host_lock);
1251 if (lpfc_cmd->pCmd == cmnd) {
1253 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1254 "0748 abort handler timed out waiting "
1255 "for abort to complete: ret %#x, ID %d, "
1256 "LUN %d, snum %#lx\n",
1257 ret, cmnd->device->id, cmnd->device->lun,
1258 cmnd->serial_number);
1262 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1263 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
1264 "LUN %d snum %#lx\n", ret, cmnd->device->id,
1265 cmnd->device->lun, cmnd->serial_number);
1270 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1272 struct Scsi_Host *shost = cmnd->device->host;
1273 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1274 struct lpfc_hba *phba = vport->phba;
1275 struct lpfc_scsi_buf *lpfc_cmd;
1276 struct lpfc_iocbq *iocbq, *iocbqrsp;
1277 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1278 struct lpfc_nodelist *pnode = rdata->pnode;
1279 unsigned long later;
1284 lpfc_block_error_handler(cmnd);
1286 * If target is not in a MAPPED state, delay the reset until
1287 * target is rediscovered or devloss timeout expires.
1289 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1290 while (time_after(later, jiffies)) {
1291 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1293 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
1295 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1296 rdata = cmnd->device->hostdata;
1299 pnode = rdata->pnode;
1301 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1302 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1303 "0721 LUN Reset rport "
1304 "failure: msec x%x rdata x%p\n",
1305 jiffies_to_msecs(jiffies - later), rdata);
1308 lpfc_cmd = lpfc_get_scsi_buf(phba);
1309 if (lpfc_cmd == NULL)
1311 lpfc_cmd->timeout = 60;
1312 lpfc_cmd->rdata = rdata;
1314 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
1318 lpfc_release_scsi_buf(phba, lpfc_cmd);
1321 iocbq = &lpfc_cmd->cur_iocbq;
1323 /* get a buffer for this IOCB command response */
1324 iocbqrsp = lpfc_sli_get_iocbq(phba);
1325 if (iocbqrsp == NULL) {
1326 lpfc_release_scsi_buf(phba, lpfc_cmd);
1329 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1330 "0703 Issue target reset to TGT %d LUN %d "
1331 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
1332 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1333 status = lpfc_sli_issue_iocb_wait(phba,
1334 &phba->sli.ring[phba->sli.fcp_ring],
1335 iocbq, iocbqrsp, lpfc_cmd->timeout);
1336 if (status == IOCB_TIMEDOUT) {
1337 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1338 ret = TIMEOUT_ERROR;
1340 if (status != IOCB_SUCCESS)
1342 lpfc_release_scsi_buf(phba, lpfc_cmd);
1344 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1345 "0713 SCSI layer issued device reset (%d, %d) "
1346 "return x%x status x%x result x%x\n",
1347 cmnd->device->id, cmnd->device->lun, ret,
1348 iocbqrsp->iocb.ulpStatus,
1349 iocbqrsp->iocb.un.ulpWord[4]);
1350 lpfc_sli_release_iocbq(phba, iocbqrsp);
1351 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
1354 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1355 cmnd->device->id, cmnd->device->lun,
1357 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1358 while (time_after(later, jiffies) && cnt) {
1359 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1360 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
1361 cmnd->device->lun, LPFC_CTX_TGT);
1364 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1365 "0719 device reset I/O flush failure: "
1373 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1375 struct Scsi_Host *shost = cmnd->device->host;
1376 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1377 struct lpfc_hba *phba = vport->phba;
1378 struct lpfc_nodelist *ndlp = NULL;
1380 int ret = SUCCESS, status = SUCCESS, i;
1382 struct lpfc_scsi_buf * lpfc_cmd;
1383 unsigned long later;
1385 lpfc_block_error_handler(cmnd);
1387 * Since the driver manages a single bus device, reset all
1388 * targets known to the driver. Should any target reset
1389 * fail, this routine returns failure to the midlayer.
1391 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1392 /* Search for mapped node by target ID */
1394 spin_lock_irq(shost->host_lock);
1395 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1396 if (!NLP_CHK_NODE_ACT(ndlp))
1398 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1399 ndlp->nlp_sid == i &&
1405 spin_unlock_irq(shost->host_lock);
1408 lpfc_cmd = lpfc_get_scsi_buf(phba);
1410 lpfc_cmd->timeout = 60;
1411 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1413 ndlp->rport->dd_data);
1414 if (status != TIMEOUT_ERROR)
1415 lpfc_release_scsi_buf(phba, lpfc_cmd);
1417 if (!lpfc_cmd || status != SUCCESS) {
1418 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1419 "0700 Bus Reset on target %d failed\n",
1425 * All outstanding txcmplq I/Os should have been aborted by
1426 * the targets. Unfortunately, some targets do not abide by
1427 * this forcing the driver to double check.
1429 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1431 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1432 0, 0, LPFC_CTX_HOST);
1433 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1434 while (time_after(later, jiffies) && cnt) {
1435 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1436 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1439 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1440 "0715 Bus Reset I/O flush failure: "
1441 "cnt x%x left x%x\n", cnt, i);
1444 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1445 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
1450 lpfc_slave_alloc(struct scsi_device *sdev)
1452 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1453 struct lpfc_hba *phba = vport->phba;
1454 struct lpfc_scsi_buf *scsi_buf = NULL;
1455 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1456 uint32_t total = 0, i;
1457 uint32_t num_to_alloc = 0;
1458 unsigned long flags;
1460 if (!rport || fc_remote_port_chkready(rport))
1463 sdev->hostdata = rport->dd_data;
1466 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1467 * available list of scsi buffers. Don't allocate more than the
1468 * HBA limit conveyed to the midlayer via the host structure. The
1469 * formula accounts for the lun_queue_depth + error handlers + 1
1470 * extra. This list of scsi bufs exists for the lifetime of the driver.
1472 total = phba->total_scsi_bufs;
1473 num_to_alloc = vport->cfg_lun_queue_depth + 2;
1475 /* Allow some exchanges to be available always to complete discovery */
1476 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1477 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1478 "0704 At limitation of %d preallocated "
1479 "command buffers\n", total);
1481 /* Allow some exchanges to be available always to complete discovery */
1482 } else if (total + num_to_alloc >
1483 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1484 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1485 "0705 Allocation request of %d "
1486 "command buffers will exceed max of %d. "
1487 "Reducing allocation request to %d.\n",
1488 num_to_alloc, phba->cfg_hba_queue_depth,
1489 (phba->cfg_hba_queue_depth - total));
1490 num_to_alloc = phba->cfg_hba_queue_depth - total;
1493 for (i = 0; i < num_to_alloc; i++) {
1494 scsi_buf = lpfc_new_scsi_buf(vport);
1496 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1497 "0706 Failed to allocate "
1498 "command buffer\n");
1502 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
1503 phba->total_scsi_bufs++;
1504 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
1505 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
1511 lpfc_slave_configure(struct scsi_device *sdev)
1513 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1514 struct lpfc_hba *phba = vport->phba;
1515 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1517 if (sdev->tagged_supported)
1518 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
1520 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
1523 * Initialize the fc transport attributes for the target
1524 * containing this scsi device. Also note that the driver's
1525 * target pointer is stored in the starget_data for the
1526 * driver's sysfs entry point functions.
1528 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
1530 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1531 lpfc_sli_poll_fcp_ring(phba);
1532 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1533 lpfc_poll_rearm_timer(phba);
1540 lpfc_slave_destroy(struct scsi_device *sdev)
1542 sdev->hostdata = NULL;
1547 struct scsi_host_template lpfc_template = {
1548 .module = THIS_MODULE,
1549 .name = LPFC_DRIVER_NAME,
1551 .queuecommand = lpfc_queuecommand,
1552 .eh_abort_handler = lpfc_abort_handler,
1553 .eh_device_reset_handler= lpfc_device_reset_handler,
1554 .eh_bus_reset_handler = lpfc_bus_reset_handler,
1555 .slave_alloc = lpfc_slave_alloc,
1556 .slave_configure = lpfc_slave_configure,
1557 .slave_destroy = lpfc_slave_destroy,
1558 .scan_finished = lpfc_scan_finished,
1560 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
1561 .cmd_per_lun = LPFC_CMD_PER_LUN,
1562 .use_clustering = ENABLE_CLUSTERING,
1563 .shost_attrs = lpfc_hba_attrs,
1564 .max_sectors = 0xFFFF,
1567 struct scsi_host_template lpfc_vport_template = {
1568 .module = THIS_MODULE,
1569 .name = LPFC_DRIVER_NAME,
1571 .queuecommand = lpfc_queuecommand,
1572 .eh_abort_handler = lpfc_abort_handler,
1573 .eh_device_reset_handler= lpfc_device_reset_handler,
1574 .eh_bus_reset_handler = lpfc_bus_reset_handler,
1575 .slave_alloc = lpfc_slave_alloc,
1576 .slave_configure = lpfc_slave_configure,
1577 .slave_destroy = lpfc_slave_destroy,
1578 .scan_finished = lpfc_scan_finished,
1580 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
1581 .cmd_per_lun = LPFC_CMD_PER_LUN,
1582 .use_clustering = ENABLE_CLUSTERING,
1583 .shost_attrs = lpfc_vport_attrs,
1584 .max_sectors = 0xFFFF,