]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/scsi/lpfc/lpfc_sli.c
Merge branch 'upstream-jeff' of git://git.kernel.org/pub/scm/linux/kernel/git/romieu...
[linux-2.6-omap-h63xx.git] / drivers / scsi / lpfc / lpfc_sli.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
32
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_compat.h"
41 #include "lpfc_debugfs.h"
42
43 /*
44  * Define macro to log: Mailbox command x%x cannot issue Data
45  * This allows multiple uses of lpfc_msgBlk0311
46  * w/o perturbing log msg utility.
47  */
48 #define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
49                         lpfc_printf_log(phba, \
50                                 KERN_INFO, \
51                                 LOG_MBOX | LOG_SLI, \
52                                 "(%d):0311 Mailbox command x%x cannot " \
53                                 "issue Data: x%x x%x x%x\n", \
54                                 pmbox->vport ? pmbox->vport->vpi : 0, \
55                                 pmbox->mb.mbxCommand,           \
56                                 phba->pport->port_state,        \
57                                 psli->sli_flag, \
58                                 flag)
59
60
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
63         LPFC_UNKNOWN_IOCB,
64         LPFC_UNSOL_IOCB,
65         LPFC_SOL_IOCB,
66         LPFC_ABORT_IOCB
67 } lpfc_iocb_type;
68
69                 /* SLI-2/SLI-3 provide different sized iocbs.  Given a pointer
70                  * to the start of the ring, and the slot number of the
71                  * desired iocb entry, calc a pointer to that entry.
72                  */
73 static inline IOCB_t *
74 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
75 {
76         return (IOCB_t *) (((char *) pring->cmdringaddr) +
77                            pring->cmdidx * phba->iocb_cmd_size);
78 }
79
80 static inline IOCB_t *
81 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
82 {
83         return (IOCB_t *) (((char *) pring->rspringaddr) +
84                            pring->rspidx * phba->iocb_rsp_size);
85 }
86
87 static struct lpfc_iocbq *
88 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
89 {
90         struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
91         struct lpfc_iocbq * iocbq = NULL;
92
93         list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
94         return iocbq;
95 }
96
97 struct lpfc_iocbq *
98 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
99 {
100         struct lpfc_iocbq * iocbq = NULL;
101         unsigned long iflags;
102
103         spin_lock_irqsave(&phba->hbalock, iflags);
104         iocbq = __lpfc_sli_get_iocbq(phba);
105         spin_unlock_irqrestore(&phba->hbalock, iflags);
106         return iocbq;
107 }
108
109 void
110 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
111 {
112         size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
113
114         /*
115          * Clean all volatile data fields, preserve iotag and node struct.
116          */
117         memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
118         list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
119 }
120
121 void
122 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
123 {
124         unsigned long iflags;
125
126         /*
127          * Clean all volatile data fields, preserve iotag and node struct.
128          */
129         spin_lock_irqsave(&phba->hbalock, iflags);
130         __lpfc_sli_release_iocbq(phba, iocbq);
131         spin_unlock_irqrestore(&phba->hbalock, iflags);
132 }
133
134 /*
135  * Translate the iocb command to an iocb command type used to decide the final
136  * disposition of each completed IOCB.
137  */
138 static lpfc_iocb_type
139 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
140 {
141         lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
142
143         if (iocb_cmnd > CMD_MAX_IOCB_CMD)
144                 return 0;
145
146         switch (iocb_cmnd) {
147         case CMD_XMIT_SEQUENCE_CR:
148         case CMD_XMIT_SEQUENCE_CX:
149         case CMD_XMIT_BCAST_CN:
150         case CMD_XMIT_BCAST_CX:
151         case CMD_ELS_REQUEST_CR:
152         case CMD_ELS_REQUEST_CX:
153         case CMD_CREATE_XRI_CR:
154         case CMD_CREATE_XRI_CX:
155         case CMD_GET_RPI_CN:
156         case CMD_XMIT_ELS_RSP_CX:
157         case CMD_GET_RPI_CR:
158         case CMD_FCP_IWRITE_CR:
159         case CMD_FCP_IWRITE_CX:
160         case CMD_FCP_IREAD_CR:
161         case CMD_FCP_IREAD_CX:
162         case CMD_FCP_ICMND_CR:
163         case CMD_FCP_ICMND_CX:
164         case CMD_FCP_TSEND_CX:
165         case CMD_FCP_TRSP_CX:
166         case CMD_FCP_TRECEIVE_CX:
167         case CMD_FCP_AUTO_TRSP_CX:
168         case CMD_ADAPTER_MSG:
169         case CMD_ADAPTER_DUMP:
170         case CMD_XMIT_SEQUENCE64_CR:
171         case CMD_XMIT_SEQUENCE64_CX:
172         case CMD_XMIT_BCAST64_CN:
173         case CMD_XMIT_BCAST64_CX:
174         case CMD_ELS_REQUEST64_CR:
175         case CMD_ELS_REQUEST64_CX:
176         case CMD_FCP_IWRITE64_CR:
177         case CMD_FCP_IWRITE64_CX:
178         case CMD_FCP_IREAD64_CR:
179         case CMD_FCP_IREAD64_CX:
180         case CMD_FCP_ICMND64_CR:
181         case CMD_FCP_ICMND64_CX:
182         case CMD_FCP_TSEND64_CX:
183         case CMD_FCP_TRSP64_CX:
184         case CMD_FCP_TRECEIVE64_CX:
185         case CMD_GEN_REQUEST64_CR:
186         case CMD_GEN_REQUEST64_CX:
187         case CMD_XMIT_ELS_RSP64_CX:
188                 type = LPFC_SOL_IOCB;
189                 break;
190         case CMD_ABORT_XRI_CN:
191         case CMD_ABORT_XRI_CX:
192         case CMD_CLOSE_XRI_CN:
193         case CMD_CLOSE_XRI_CX:
194         case CMD_XRI_ABORTED_CX:
195         case CMD_ABORT_MXRI64_CN:
196                 type = LPFC_ABORT_IOCB;
197                 break;
198         case CMD_RCV_SEQUENCE_CX:
199         case CMD_RCV_ELS_REQ_CX:
200         case CMD_RCV_SEQUENCE64_CX:
201         case CMD_RCV_ELS_REQ64_CX:
202         case CMD_IOCB_RCV_SEQ64_CX:
203         case CMD_IOCB_RCV_ELS64_CX:
204         case CMD_IOCB_RCV_CONT64_CX:
205                 type = LPFC_UNSOL_IOCB;
206                 break;
207         default:
208                 type = LPFC_UNKNOWN_IOCB;
209                 break;
210         }
211
212         return type;
213 }
214
215 static int
216 lpfc_sli_ring_map(struct lpfc_hba *phba)
217 {
218         struct lpfc_sli *psli = &phba->sli;
219         LPFC_MBOXQ_t *pmb;
220         MAILBOX_t *pmbox;
221         int i, rc, ret = 0;
222
223         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
224         if (!pmb)
225                 return -ENOMEM;
226         pmbox = &pmb->mb;
227         phba->link_state = LPFC_INIT_MBX_CMDS;
228         for (i = 0; i < psli->num_rings; i++) {
229                 lpfc_config_ring(phba, i, pmb);
230                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
231                 if (rc != MBX_SUCCESS) {
232                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
233                                         "0446 Adapter failed to init (%d), "
234                                         "mbxCmd x%x CFG_RING, mbxStatus x%x, "
235                                         "ring %d\n",
236                                         rc, pmbox->mbxCommand,
237                                         pmbox->mbxStatus, i);
238                         phba->link_state = LPFC_HBA_ERROR;
239                         ret = -ENXIO;
240                         break;
241                 }
242         }
243         mempool_free(pmb, phba->mbox_mem_pool);
244         return ret;
245 }
246
247 static int
248 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
249                         struct lpfc_iocbq *piocb)
250 {
251         list_add_tail(&piocb->list, &pring->txcmplq);
252         pring->txcmplq_cnt++;
253         if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
254            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
255            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
256                 if (!piocb->vport)
257                         BUG();
258                 else
259                         mod_timer(&piocb->vport->els_tmofunc,
260                                   jiffies + HZ * (phba->fc_ratov << 1));
261         }
262
263
264         return 0;
265 }
266
267 static struct lpfc_iocbq *
268 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
269 {
270         struct lpfc_iocbq *cmd_iocb;
271
272         list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
273         if (cmd_iocb != NULL)
274                 pring->txq_cnt--;
275         return cmd_iocb;
276 }
277
278 static IOCB_t *
279 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
280 {
281         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
282                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
283                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
284         uint32_t  max_cmd_idx = pring->numCiocb;
285
286         if ((pring->next_cmdidx == pring->cmdidx) &&
287            (++pring->next_cmdidx >= max_cmd_idx))
288                 pring->next_cmdidx = 0;
289
290         if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
291
292                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
293
294                 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
295                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
296                                         "0315 Ring %d issue: portCmdGet %d "
297                                         "is bigger then cmd ring %d\n",
298                                         pring->ringno,
299                                         pring->local_getidx, max_cmd_idx);
300
301                         phba->link_state = LPFC_HBA_ERROR;
302                         /*
303                          * All error attention handlers are posted to
304                          * worker thread
305                          */
306                         phba->work_ha |= HA_ERATT;
307                         phba->work_hs = HS_FFER3;
308
309                         /* hbalock should already be held */
310                         if (phba->work_wait)
311                                 lpfc_worker_wake_up(phba);
312
313                         return NULL;
314                 }
315
316                 if (pring->local_getidx == pring->next_cmdidx)
317                         return NULL;
318         }
319
320         return lpfc_cmd_iocb(phba, pring);
321 }
322
323 uint16_t
324 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
325 {
326         struct lpfc_iocbq **new_arr;
327         struct lpfc_iocbq **old_arr;
328         size_t new_len;
329         struct lpfc_sli *psli = &phba->sli;
330         uint16_t iotag;
331
332         spin_lock_irq(&phba->hbalock);
333         iotag = psli->last_iotag;
334         if(++iotag < psli->iocbq_lookup_len) {
335                 psli->last_iotag = iotag;
336                 psli->iocbq_lookup[iotag] = iocbq;
337                 spin_unlock_irq(&phba->hbalock);
338                 iocbq->iotag = iotag;
339                 return iotag;
340         } else if (psli->iocbq_lookup_len < (0xffff
341                                            - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
342                 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
343                 spin_unlock_irq(&phba->hbalock);
344                 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
345                                   GFP_KERNEL);
346                 if (new_arr) {
347                         spin_lock_irq(&phba->hbalock);
348                         old_arr = psli->iocbq_lookup;
349                         if (new_len <= psli->iocbq_lookup_len) {
350                                 /* highly unprobable case */
351                                 kfree(new_arr);
352                                 iotag = psli->last_iotag;
353                                 if(++iotag < psli->iocbq_lookup_len) {
354                                         psli->last_iotag = iotag;
355                                         psli->iocbq_lookup[iotag] = iocbq;
356                                         spin_unlock_irq(&phba->hbalock);
357                                         iocbq->iotag = iotag;
358                                         return iotag;
359                                 }
360                                 spin_unlock_irq(&phba->hbalock);
361                                 return 0;
362                         }
363                         if (psli->iocbq_lookup)
364                                 memcpy(new_arr, old_arr,
365                                        ((psli->last_iotag  + 1) *
366                                         sizeof (struct lpfc_iocbq *)));
367                         psli->iocbq_lookup = new_arr;
368                         psli->iocbq_lookup_len = new_len;
369                         psli->last_iotag = iotag;
370                         psli->iocbq_lookup[iotag] = iocbq;
371                         spin_unlock_irq(&phba->hbalock);
372                         iocbq->iotag = iotag;
373                         kfree(old_arr);
374                         return iotag;
375                 }
376         } else
377                 spin_unlock_irq(&phba->hbalock);
378
379         lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
380                         "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
381                         psli->last_iotag);
382
383         return 0;
384 }
385
386 static void
387 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
388                 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
389 {
390         /*
391          * Set up an iotag
392          */
393         nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
394
395         if (pring->ringno == LPFC_ELS_RING) {
396                 lpfc_debugfs_slow_ring_trc(phba,
397                         "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
398                         *(((uint32_t *) &nextiocb->iocb) + 4),
399                         *(((uint32_t *) &nextiocb->iocb) + 6),
400                         *(((uint32_t *) &nextiocb->iocb) + 7));
401         }
402
403         /*
404          * Issue iocb command to adapter
405          */
406         lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
407         wmb();
408         pring->stats.iocb_cmd++;
409
410         /*
411          * If there is no completion routine to call, we can release the
412          * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
413          * that have no rsp ring completion, iocb_cmpl MUST be NULL.
414          */
415         if (nextiocb->iocb_cmpl)
416                 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
417         else
418                 __lpfc_sli_release_iocbq(phba, nextiocb);
419
420         /*
421          * Let the HBA know what IOCB slot will be the next one the
422          * driver will put a command into.
423          */
424         pring->cmdidx = pring->next_cmdidx;
425         writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
426 }
427
428 static void
429 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
430 {
431         int ringno = pring->ringno;
432
433         pring->flag |= LPFC_CALL_RING_AVAILABLE;
434
435         wmb();
436
437         /*
438          * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
439          * The HBA will tell us when an IOCB entry is available.
440          */
441         writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
442         readl(phba->CAregaddr); /* flush */
443
444         pring->stats.iocb_cmd_full++;
445 }
446
447 static void
448 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
449 {
450         int ringno = pring->ringno;
451
452         /*
453          * Tell the HBA that there is work to do in this ring.
454          */
455         wmb();
456         writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
457         readl(phba->CAregaddr); /* flush */
458 }
459
460 static void
461 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
462 {
463         IOCB_t *iocb;
464         struct lpfc_iocbq *nextiocb;
465
466         /*
467          * Check to see if:
468          *  (a) there is anything on the txq to send
469          *  (b) link is up
470          *  (c) link attention events can be processed (fcp ring only)
471          *  (d) IOCB processing is not blocked by the outstanding mbox command.
472          */
473         if (pring->txq_cnt &&
474             lpfc_is_link_up(phba) &&
475             (pring->ringno != phba->sli.fcp_ring ||
476              phba->sli.sli_flag & LPFC_PROCESS_LA) &&
477             !(pring->flag & LPFC_STOP_IOCB_MBX)) {
478
479                 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
480                        (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
481                         lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
482
483                 if (iocb)
484                         lpfc_sli_update_ring(phba, pring);
485                 else
486                         lpfc_sli_update_full_ring(phba, pring);
487         }
488
489         return;
490 }
491
492 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
493 static void
494 lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
495 {
496         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
497                 &phba->slim2p->mbx.us.s3_pgp.port[ringno] :
498                 &phba->slim2p->mbx.us.s2.port[ringno];
499         unsigned long iflags;
500
501         /* If the ring is active, flag it */
502         spin_lock_irqsave(&phba->hbalock, iflags);
503         if (phba->sli.ring[ringno].cmdringaddr) {
504                 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
505                         phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
506                         /*
507                          * Force update of the local copy of cmdGetInx
508                          */
509                         phba->sli.ring[ringno].local_getidx
510                                 = le32_to_cpu(pgp->cmdGetInx);
511                         lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
512                 }
513         }
514         spin_unlock_irqrestore(&phba->hbalock, iflags);
515 }
516
517 struct lpfc_hbq_entry *
518 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
519 {
520         struct hbq_s *hbqp = &phba->hbqs[hbqno];
521
522         if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
523             ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
524                 hbqp->next_hbqPutIdx = 0;
525
526         if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
527                 uint32_t raw_index = phba->hbq_get[hbqno];
528                 uint32_t getidx = le32_to_cpu(raw_index);
529
530                 hbqp->local_hbqGetIdx = getidx;
531
532                 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
533                         lpfc_printf_log(phba, KERN_ERR,
534                                         LOG_SLI | LOG_VPORT,
535                                         "1802 HBQ %d: local_hbqGetIdx "
536                                         "%u is > than hbqp->entry_count %u\n",
537                                         hbqno, hbqp->local_hbqGetIdx,
538                                         hbqp->entry_count);
539
540                         phba->link_state = LPFC_HBA_ERROR;
541                         return NULL;
542                 }
543
544                 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
545                         return NULL;
546         }
547
548         return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
549                         hbqp->hbqPutIdx;
550 }
551
552 void
553 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
554 {
555         struct lpfc_dmabuf *dmabuf, *next_dmabuf;
556         struct hbq_dmabuf *hbq_buf;
557         int i, hbq_count;
558
559         hbq_count = lpfc_sli_hbq_count();
560         /* Return all memory used by all HBQs */
561         for (i = 0; i < hbq_count; ++i) {
562                 list_for_each_entry_safe(dmabuf, next_dmabuf,
563                                 &phba->hbqs[i].hbq_buffer_list, list) {
564                         hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
565                         list_del(&hbq_buf->dbuf.list);
566                         (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
567                 }
568         }
569 }
570
571 static struct lpfc_hbq_entry *
572 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
573                          struct hbq_dmabuf *hbq_buf)
574 {
575         struct lpfc_hbq_entry *hbqe;
576         dma_addr_t physaddr = hbq_buf->dbuf.phys;
577
578         /* Get next HBQ entry slot to use */
579         hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
580         if (hbqe) {
581                 struct hbq_s *hbqp = &phba->hbqs[hbqno];
582
583                 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
584                 hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
585                 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
586                 hbqe->bde.tus.f.bdeFlags = 0;
587                 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
588                 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
589                                 /* Sync SLIM */
590                 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
591                 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
592                                 /* flush */
593                 readl(phba->hbq_put + hbqno);
594                 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
595         }
596         return hbqe;
597 }
598
599 static struct lpfc_hbq_init lpfc_els_hbq = {
600         .rn = 1,
601         .entry_count = 200,
602         .mask_count = 0,
603         .profile = 0,
604         .ring_mask = (1 << LPFC_ELS_RING),
605         .buffer_count = 0,
606         .init_count = 20,
607         .add_count = 5,
608 };
609
610 static struct lpfc_hbq_init lpfc_extra_hbq = {
611         .rn = 1,
612         .entry_count = 200,
613         .mask_count = 0,
614         .profile = 0,
615         .ring_mask = (1 << LPFC_EXTRA_RING),
616         .buffer_count = 0,
617         .init_count = 0,
618         .add_count = 5,
619 };
620
621 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
622         &lpfc_els_hbq,
623         &lpfc_extra_hbq,
624 };
625
626 static int
627 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
628 {
629         uint32_t i, start, end;
630         struct hbq_dmabuf *hbq_buffer;
631
632         if (!phba->hbqs[hbqno].hbq_alloc_buffer) {
633                 return 0;
634         }
635
636         start = lpfc_hbq_defs[hbqno]->buffer_count;
637         end = count + lpfc_hbq_defs[hbqno]->buffer_count;
638         if (end > lpfc_hbq_defs[hbqno]->entry_count) {
639                 end = lpfc_hbq_defs[hbqno]->entry_count;
640         }
641
642         /* Populate HBQ entries */
643         for (i = start; i < end; i++) {
644                 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
645                 if (!hbq_buffer)
646                         return 1;
647                 hbq_buffer->tag = (i | (hbqno << 16));
648                 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
649                         lpfc_hbq_defs[hbqno]->buffer_count++;
650                 else
651                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
652         }
653         return 0;
654 }
655
656 int
657 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
658 {
659         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
660                                          lpfc_hbq_defs[qno]->add_count));
661 }
662
663 int
664 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
665 {
666         return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
667                                          lpfc_hbq_defs[qno]->init_count));
668 }
669
670 struct hbq_dmabuf *
671 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
672 {
673         struct lpfc_dmabuf *d_buf;
674         struct hbq_dmabuf *hbq_buf;
675         uint32_t hbqno;
676
677         hbqno = tag >> 16;
678         if (hbqno >= LPFC_MAX_HBQS)
679                 return NULL;
680
681         list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
682                 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
683                 if (hbq_buf->tag == tag) {
684                         return hbq_buf;
685                 }
686         }
687         lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
688                         "1803 Bad hbq tag. Data: x%x x%x\n",
689                         tag, lpfc_hbq_defs[tag >> 16]->buffer_count);
690         return NULL;
691 }
692
693 void
694 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
695 {
696         uint32_t hbqno;
697
698         if (hbq_buffer) {
699                 hbqno = hbq_buffer->tag >> 16;
700                 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
701                         (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
702                 }
703         }
704 }
705
706 static int
707 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
708 {
709         uint8_t ret;
710
711         switch (mbxCommand) {
712         case MBX_LOAD_SM:
713         case MBX_READ_NV:
714         case MBX_WRITE_NV:
715         case MBX_RUN_BIU_DIAG:
716         case MBX_INIT_LINK:
717         case MBX_DOWN_LINK:
718         case MBX_CONFIG_LINK:
719         case MBX_CONFIG_RING:
720         case MBX_RESET_RING:
721         case MBX_READ_CONFIG:
722         case MBX_READ_RCONFIG:
723         case MBX_READ_SPARM:
724         case MBX_READ_STATUS:
725         case MBX_READ_RPI:
726         case MBX_READ_XRI:
727         case MBX_READ_REV:
728         case MBX_READ_LNK_STAT:
729         case MBX_REG_LOGIN:
730         case MBX_UNREG_LOGIN:
731         case MBX_READ_LA:
732         case MBX_CLEAR_LA:
733         case MBX_DUMP_MEMORY:
734         case MBX_DUMP_CONTEXT:
735         case MBX_RUN_DIAGS:
736         case MBX_RESTART:
737         case MBX_UPDATE_CFG:
738         case MBX_DOWN_LOAD:
739         case MBX_DEL_LD_ENTRY:
740         case MBX_RUN_PROGRAM:
741         case MBX_SET_MASK:
742         case MBX_SET_SLIM:
743         case MBX_UNREG_D_ID:
744         case MBX_KILL_BOARD:
745         case MBX_CONFIG_FARP:
746         case MBX_BEACON:
747         case MBX_LOAD_AREA:
748         case MBX_RUN_BIU_DIAG64:
749         case MBX_CONFIG_PORT:
750         case MBX_READ_SPARM64:
751         case MBX_READ_RPI64:
752         case MBX_REG_LOGIN64:
753         case MBX_READ_LA64:
754         case MBX_FLASH_WR_ULA:
755         case MBX_SET_DEBUG:
756         case MBX_LOAD_EXP_ROM:
757         case MBX_REG_VPI:
758         case MBX_UNREG_VPI:
759         case MBX_HEARTBEAT:
760                 ret = mbxCommand;
761                 break;
762         default:
763                 ret = MBX_SHUTDOWN;
764                 break;
765         }
766         return ret;
767 }
768 static void
769 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
770 {
771         wait_queue_head_t *pdone_q;
772         unsigned long drvr_flag;
773
774         /*
775          * If pdone_q is empty, the driver thread gave up waiting and
776          * continued running.
777          */
778         pmboxq->mbox_flag |= LPFC_MBX_WAKE;
779         spin_lock_irqsave(&phba->hbalock, drvr_flag);
780         pdone_q = (wait_queue_head_t *) pmboxq->context1;
781         if (pdone_q)
782                 wake_up_interruptible(pdone_q);
783         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
784         return;
785 }
786
787 void
788 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
789 {
790         struct lpfc_dmabuf *mp;
791         uint16_t rpi;
792         int rc;
793
794         mp = (struct lpfc_dmabuf *) (pmb->context1);
795
796         if (mp) {
797                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
798                 kfree(mp);
799         }
800
801         /*
802          * If a REG_LOGIN succeeded  after node is destroyed or node
803          * is in re-discovery driver need to cleanup the RPI.
804          */
805         if (!(phba->pport->load_flag & FC_UNLOADING) &&
806             pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
807             !pmb->mb.mbxStatus) {
808
809                 rpi = pmb->mb.un.varWords[0];
810                 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
811                 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
812                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
813                 if (rc != MBX_NOT_FINISHED)
814                         return;
815         }
816
817         mempool_free(pmb, phba->mbox_mem_pool);
818         return;
819 }
820
821 int
822 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
823 {
824         MAILBOX_t *pmbox;
825         LPFC_MBOXQ_t *pmb;
826         int rc;
827         LIST_HEAD(cmplq);
828
829         phba->sli.slistat.mbox_event++;
830
831         /* Get all completed mailboxe buffers into the cmplq */
832         spin_lock_irq(&phba->hbalock);
833         list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
834         spin_unlock_irq(&phba->hbalock);
835
836         /* Get a Mailbox buffer to setup mailbox commands for callback */
837         do {
838                 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
839                 if (pmb == NULL)
840                         break;
841
842                 pmbox = &pmb->mb;
843
844                 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
845                         if (pmb->vport) {
846                                 lpfc_debugfs_disc_trc(pmb->vport,
847                                         LPFC_DISC_TRC_MBOX_VPORT,
848                                         "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
849                                         (uint32_t)pmbox->mbxCommand,
850                                         pmbox->un.varWords[0],
851                                         pmbox->un.varWords[1]);
852                         }
853                         else {
854                                 lpfc_debugfs_disc_trc(phba->pport,
855                                         LPFC_DISC_TRC_MBOX,
856                                         "MBOX cmpl:       cmd:x%x mb:x%x x%x",
857                                         (uint32_t)pmbox->mbxCommand,
858                                         pmbox->un.varWords[0],
859                                         pmbox->un.varWords[1]);
860                         }
861                 }
862
863                 /*
864                  * It is a fatal error if unknown mbox command completion.
865                  */
866                 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
867                     MBX_SHUTDOWN) {
868                         /* Unknow mailbox command compl */
869                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
870                                         "(%d):0323 Unknown Mailbox command "
871                                         "%x Cmpl\n",
872                                         pmb->vport ? pmb->vport->vpi : 0,
873                                         pmbox->mbxCommand);
874                         phba->link_state = LPFC_HBA_ERROR;
875                         phba->work_hs = HS_FFER3;
876                         lpfc_handle_eratt(phba);
877                         continue;
878                 }
879
880                 if (pmbox->mbxStatus) {
881                         phba->sli.slistat.mbox_stat_err++;
882                         if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
883                                 /* Mbox cmd cmpl error - RETRYing */
884                                 lpfc_printf_log(phba, KERN_INFO,
885                                                 LOG_MBOX | LOG_SLI,
886                                                 "(%d):0305 Mbox cmd cmpl "
887                                                 "error - RETRYing Data: x%x "
888                                                 "x%x x%x x%x\n",
889                                                 pmb->vport ? pmb->vport->vpi :0,
890                                                 pmbox->mbxCommand,
891                                                 pmbox->mbxStatus,
892                                                 pmbox->un.varWords[0],
893                                                 pmb->vport->port_state);
894                                 pmbox->mbxStatus = 0;
895                                 pmbox->mbxOwner = OWN_HOST;
896                                 spin_lock_irq(&phba->hbalock);
897                                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
898                                 spin_unlock_irq(&phba->hbalock);
899                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
900                                 if (rc == MBX_SUCCESS)
901                                         continue;
902                         }
903                 }
904
905                 /* Mailbox cmd <cmd> Cmpl <cmpl> */
906                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
907                                 "(%d):0307 Mailbox cmd x%x Cmpl x%p "
908                                 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
909                                 pmb->vport ? pmb->vport->vpi : 0,
910                                 pmbox->mbxCommand,
911                                 pmb->mbox_cmpl,
912                                 *((uint32_t *) pmbox),
913                                 pmbox->un.varWords[0],
914                                 pmbox->un.varWords[1],
915                                 pmbox->un.varWords[2],
916                                 pmbox->un.varWords[3],
917                                 pmbox->un.varWords[4],
918                                 pmbox->un.varWords[5],
919                                 pmbox->un.varWords[6],
920                                 pmbox->un.varWords[7]);
921
922                 if (pmb->mbox_cmpl)
923                         pmb->mbox_cmpl(phba,pmb);
924         } while (1);
925         return 0;
926 }
927
928 static struct lpfc_dmabuf *
929 lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
930 {
931         struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
932         uint32_t hbqno;
933         void *virt;             /* virtual address ptr */
934         dma_addr_t phys;        /* mapped address */
935
936         hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
937         if (hbq_entry == NULL)
938                 return NULL;
939         list_del(&hbq_entry->dbuf.list);
940
941         hbqno = tag >> 16;
942         new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
943         if (new_hbq_entry == NULL)
944                 return &hbq_entry->dbuf;
945         new_hbq_entry->tag = -1;
946         phys = new_hbq_entry->dbuf.phys;
947         virt = new_hbq_entry->dbuf.virt;
948         new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
949         new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
950         hbq_entry->dbuf.phys = phys;
951         hbq_entry->dbuf.virt = virt;
952         lpfc_sli_free_hbq(phba, hbq_entry);
953         return &new_hbq_entry->dbuf;
954 }
955
956 static int
957 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
958                             struct lpfc_iocbq *saveq)
959 {
960         IOCB_t           * irsp;
961         WORD5            * w5p;
962         uint32_t           Rctl, Type;
963         uint32_t           match, i;
964
965         match = 0;
966         irsp = &(saveq->iocb);
967         if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
968             || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
969             || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
970             || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
971                 Rctl = FC_ELS_REQ;
972                 Type = FC_ELS_DATA;
973         } else {
974                 w5p =
975                     (WORD5 *) & (saveq->iocb.un.
976                                  ulpWord[5]);
977                 Rctl = w5p->hcsw.Rctl;
978                 Type = w5p->hcsw.Type;
979
980                 /* Firmware Workaround */
981                 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
982                         (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
983                          irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
984                         Rctl = FC_ELS_REQ;
985                         Type = FC_ELS_DATA;
986                         w5p->hcsw.Rctl = Rctl;
987                         w5p->hcsw.Type = Type;
988                 }
989         }
990
991         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
992                 if (irsp->ulpBdeCount != 0)
993                         saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
994                                                 irsp->un.ulpWord[3]);
995                 if (irsp->ulpBdeCount == 2)
996                         saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
997                                                 irsp->unsli3.sli3Words[7]);
998         }
999
1000         /* unSolicited Responses */
1001         if (pring->prt[0].profile) {
1002                 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1003                         (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1004                                                                         saveq);
1005                 match = 1;
1006         } else {
1007                 /* We must search, based on rctl / type
1008                    for the right routine */
1009                 for (i = 0; i < pring->num_mask;
1010                      i++) {
1011                         if ((pring->prt[i].rctl ==
1012                              Rctl)
1013                             && (pring->prt[i].
1014                                 type == Type)) {
1015                                 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1016                                         (pring->prt[i].lpfc_sli_rcv_unsol_event)
1017                                                         (phba, pring, saveq);
1018                                 match = 1;
1019                                 break;
1020                         }
1021                 }
1022         }
1023         if (match == 0) {
1024                 /* Unexpected Rctl / Type received */
1025                 /* Ring <ringno> handler: unexpected
1026                    Rctl <Rctl> Type <Type> received */
1027                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1028                                 "0313 Ring %d handler: unexpected Rctl x%x "
1029                                 "Type x%x received\n",
1030                                 pring->ringno, Rctl, Type);
1031         }
1032         return 1;
1033 }
1034
1035 static struct lpfc_iocbq *
1036 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1037                       struct lpfc_sli_ring *pring,
1038                       struct lpfc_iocbq *prspiocb)
1039 {
1040         struct lpfc_iocbq *cmd_iocb = NULL;
1041         uint16_t iotag;
1042
1043         iotag = prspiocb->iocb.ulpIoTag;
1044
1045         if (iotag != 0 && iotag <= phba->sli.last_iotag) {
1046                 cmd_iocb = phba->sli.iocbq_lookup[iotag];
1047                 list_del_init(&cmd_iocb->list);
1048                 pring->txcmplq_cnt--;
1049                 return cmd_iocb;
1050         }
1051
1052         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1053                         "0317 iotag x%x is out off "
1054                         "range: max iotag x%x wd0 x%x\n",
1055                         iotag, phba->sli.last_iotag,
1056                         *(((uint32_t *) &prspiocb->iocb) + 7));
1057         return NULL;
1058 }
1059
1060 static int
1061 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1062                           struct lpfc_iocbq *saveq)
1063 {
1064         struct lpfc_iocbq *cmdiocbp;
1065         int rc = 1;
1066         unsigned long iflag;
1067
1068         /* Based on the iotag field, get the cmd IOCB from the txcmplq */
1069         spin_lock_irqsave(&phba->hbalock, iflag);
1070         cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
1071         spin_unlock_irqrestore(&phba->hbalock, iflag);
1072
1073         if (cmdiocbp) {
1074                 if (cmdiocbp->iocb_cmpl) {
1075                         /*
1076                          * Post all ELS completions to the worker thread.
1077                          * All other are passed to the completion callback.
1078                          */
1079                         if (pring->ringno == LPFC_ELS_RING) {
1080                                 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
1081                                         cmdiocbp->iocb_flag &=
1082                                                 ~LPFC_DRIVER_ABORTED;
1083                                         saveq->iocb.ulpStatus =
1084                                                 IOSTAT_LOCAL_REJECT;
1085                                         saveq->iocb.un.ulpWord[4] =
1086                                                 IOERR_SLI_ABORTED;
1087                                 }
1088                         }
1089                         (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
1090                 } else
1091                         lpfc_sli_release_iocbq(phba, cmdiocbp);
1092         } else {
1093                 /*
1094                  * Unknown initiating command based on the response iotag.
1095                  * This could be the case on the ELS ring because of
1096                  * lpfc_els_abort().
1097                  */
1098                 if (pring->ringno != LPFC_ELS_RING) {
1099                         /*
1100                          * Ring <ringno> handler: unexpected completion IoTag
1101                          * <IoTag>
1102                          */
1103                         lpfc_printf_vlog(cmdiocbp->vport, KERN_WARNING, LOG_SLI,
1104                                          "0322 Ring %d handler: "
1105                                          "unexpected completion IoTag x%x "
1106                                          "Data: x%x x%x x%x x%x\n",
1107                                          pring->ringno,
1108                                          saveq->iocb.ulpIoTag,
1109                                          saveq->iocb.ulpStatus,
1110                                          saveq->iocb.un.ulpWord[4],
1111                                          saveq->iocb.ulpCommand,
1112                                          saveq->iocb.ulpContext);
1113                 }
1114         }
1115
1116         return rc;
1117 }
1118
1119 static void
1120 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1121 {
1122         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1123                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1124                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1125         /*
1126          * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1127          * rsp ring <portRspMax>
1128          */
1129         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1130                         "0312 Ring %d handler: portRspPut %d "
1131                         "is bigger then rsp ring %d\n",
1132                         pring->ringno, le32_to_cpu(pgp->rspPutInx),
1133                         pring->numRiocb);
1134
1135         phba->link_state = LPFC_HBA_ERROR;
1136
1137         /*
1138          * All error attention handlers are posted to
1139          * worker thread
1140          */
1141         phba->work_ha |= HA_ERATT;
1142         phba->work_hs = HS_FFER3;
1143
1144         /* hbalock should already be held */
1145         if (phba->work_wait)
1146                 lpfc_worker_wake_up(phba);
1147
1148         return;
1149 }
1150
1151 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1152 {
1153         struct lpfc_sli      *psli  = &phba->sli;
1154         struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1155         IOCB_t *irsp = NULL;
1156         IOCB_t *entry = NULL;
1157         struct lpfc_iocbq *cmdiocbq = NULL;
1158         struct lpfc_iocbq rspiocbq;
1159         struct lpfc_pgp *pgp;
1160         uint32_t status;
1161         uint32_t portRspPut, portRspMax;
1162         int type;
1163         uint32_t rsp_cmpl = 0;
1164         uint32_t ha_copy;
1165         unsigned long iflags;
1166
1167         pring->stats.iocb_event++;
1168
1169         pgp = (phba->sli_rev == 3) ?
1170                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1171                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1172
1173
1174         /*
1175          * The next available response entry should never exceed the maximum
1176          * entries.  If it does, treat it as an adapter hardware error.
1177          */
1178         portRspMax = pring->numRiocb;
1179         portRspPut = le32_to_cpu(pgp->rspPutInx);
1180         if (unlikely(portRspPut >= portRspMax)) {
1181                 lpfc_sli_rsp_pointers_error(phba, pring);
1182                 return;
1183         }
1184
1185         rmb();
1186         while (pring->rspidx != portRspPut) {
1187                 entry = lpfc_resp_iocb(phba, pring);
1188                 if (++pring->rspidx >= portRspMax)
1189                         pring->rspidx = 0;
1190
1191                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1192                                       (uint32_t *) &rspiocbq.iocb,
1193                                       phba->iocb_rsp_size);
1194                 irsp = &rspiocbq.iocb;
1195                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1196                 pring->stats.iocb_rsp++;
1197                 rsp_cmpl++;
1198
1199                 if (unlikely(irsp->ulpStatus)) {
1200                         /* Rsp ring <ringno> error: IOCB */
1201                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1202                                         "0326 Rsp Ring %d error: IOCB Data: "
1203                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1204                                         pring->ringno,
1205                                         irsp->un.ulpWord[0],
1206                                         irsp->un.ulpWord[1],
1207                                         irsp->un.ulpWord[2],
1208                                         irsp->un.ulpWord[3],
1209                                         irsp->un.ulpWord[4],
1210                                         irsp->un.ulpWord[5],
1211                                         *(((uint32_t *) irsp) + 6),
1212                                         *(((uint32_t *) irsp) + 7));
1213                 }
1214
1215                 switch (type) {
1216                 case LPFC_ABORT_IOCB:
1217                 case LPFC_SOL_IOCB:
1218                         /*
1219                          * Idle exchange closed via ABTS from port.  No iocb
1220                          * resources need to be recovered.
1221                          */
1222                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1223                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1224                                                 "0314 IOCB cmd 0x%x "
1225                                                 "processed. Skipping "
1226                                                 "completion",
1227                                                 irsp->ulpCommand);
1228                                 break;
1229                         }
1230
1231                         spin_lock_irqsave(&phba->hbalock, iflags);
1232                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1233                                                          &rspiocbq);
1234                         spin_unlock_irqrestore(&phba->hbalock, iflags);
1235                         if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1236                                 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1237                                                       &rspiocbq);
1238                         }
1239                         break;
1240                 default:
1241                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1242                                 char adaptermsg[LPFC_MAX_ADPTMSG];
1243                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1244                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1245                                        MAX_MSG_DATA);
1246                                 dev_warn(&((phba->pcidev)->dev),
1247                                          "lpfc%d: %s\n",
1248                                          phba->brd_no, adaptermsg);
1249                         } else {
1250                                 /* Unknown IOCB command */
1251                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1252                                                 "0321 Unknown IOCB command "
1253                                                 "Data: x%x, x%x x%x x%x x%x\n",
1254                                                 type, irsp->ulpCommand,
1255                                                 irsp->ulpStatus,
1256                                                 irsp->ulpIoTag,
1257                                                 irsp->ulpContext);
1258                         }
1259                         break;
1260                 }
1261
1262                 /*
1263                  * The response IOCB has been processed.  Update the ring
1264                  * pointer in SLIM.  If the port response put pointer has not
1265                  * been updated, sync the pgp->rspPutInx and fetch the new port
1266                  * response put pointer.
1267                  */
1268                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1269
1270                 if (pring->rspidx == portRspPut)
1271                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1272         }
1273
1274         ha_copy = readl(phba->HAregaddr);
1275         ha_copy >>= (LPFC_FCP_RING * 4);
1276
1277         if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1278                 spin_lock_irqsave(&phba->hbalock, iflags);
1279                 pring->stats.iocb_rsp_full++;
1280                 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1281                 writel(status, phba->CAregaddr);
1282                 readl(phba->CAregaddr);
1283                 spin_unlock_irqrestore(&phba->hbalock, iflags);
1284         }
1285         if ((ha_copy & HA_R0CE_RSP) &&
1286             (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1287                 spin_lock_irqsave(&phba->hbalock, iflags);
1288                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1289                 pring->stats.iocb_cmd_empty++;
1290
1291                 /* Force update of the local copy of cmdGetInx */
1292                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1293                 lpfc_sli_resume_iocb(phba, pring);
1294
1295                 if ((pring->lpfc_sli_cmd_available))
1296                         (pring->lpfc_sli_cmd_available) (phba, pring);
1297
1298                 spin_unlock_irqrestore(&phba->hbalock, iflags);
1299         }
1300
1301         return;
1302 }
1303
1304 /*
1305  * This routine presumes LPFC_FCP_RING handling and doesn't bother
1306  * to check it explicitly.
1307  */
1308 static int
1309 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1310                                 struct lpfc_sli_ring *pring, uint32_t mask)
1311 {
1312         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1313                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1314                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1315         IOCB_t *irsp = NULL;
1316         IOCB_t *entry = NULL;
1317         struct lpfc_iocbq *cmdiocbq = NULL;
1318         struct lpfc_iocbq rspiocbq;
1319         uint32_t status;
1320         uint32_t portRspPut, portRspMax;
1321         int rc = 1;
1322         lpfc_iocb_type type;
1323         unsigned long iflag;
1324         uint32_t rsp_cmpl = 0;
1325
1326         spin_lock_irqsave(&phba->hbalock, iflag);
1327         pring->stats.iocb_event++;
1328
1329         /*
1330          * The next available response entry should never exceed the maximum
1331          * entries.  If it does, treat it as an adapter hardware error.
1332          */
1333         portRspMax = pring->numRiocb;
1334         portRspPut = le32_to_cpu(pgp->rspPutInx);
1335         if (unlikely(portRspPut >= portRspMax)) {
1336                 lpfc_sli_rsp_pointers_error(phba, pring);
1337                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1338                 return 1;
1339         }
1340
1341         rmb();
1342         while (pring->rspidx != portRspPut) {
1343                 /*
1344                  * Fetch an entry off the ring and copy it into a local data
1345                  * structure.  The copy involves a byte-swap since the
1346                  * network byte order and pci byte orders are different.
1347                  */
1348                 entry = lpfc_resp_iocb(phba, pring);
1349                 phba->last_completion_time = jiffies;
1350
1351                 if (++pring->rspidx >= portRspMax)
1352                         pring->rspidx = 0;
1353
1354                 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1355                                       (uint32_t *) &rspiocbq.iocb,
1356                                       phba->iocb_rsp_size);
1357                 INIT_LIST_HEAD(&(rspiocbq.list));
1358                 irsp = &rspiocbq.iocb;
1359
1360                 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1361                 pring->stats.iocb_rsp++;
1362                 rsp_cmpl++;
1363
1364                 if (unlikely(irsp->ulpStatus)) {
1365                         /*
1366                          * If resource errors reported from HBA, reduce
1367                          * queuedepths of the SCSI device.
1368                          */
1369                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1370                                 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1371                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1372                                 lpfc_adjust_queue_depth(phba);
1373                                 spin_lock_irqsave(&phba->hbalock, iflag);
1374                         }
1375
1376                         /* Rsp ring <ringno> error: IOCB */
1377                         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1378                                         "0336 Rsp Ring %d error: IOCB Data: "
1379                                         "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1380                                         pring->ringno,
1381                                         irsp->un.ulpWord[0],
1382                                         irsp->un.ulpWord[1],
1383                                         irsp->un.ulpWord[2],
1384                                         irsp->un.ulpWord[3],
1385                                         irsp->un.ulpWord[4],
1386                                         irsp->un.ulpWord[5],
1387                                         *(((uint32_t *) irsp) + 6),
1388                                         *(((uint32_t *) irsp) + 7));
1389                 }
1390
1391                 switch (type) {
1392                 case LPFC_ABORT_IOCB:
1393                 case LPFC_SOL_IOCB:
1394                         /*
1395                          * Idle exchange closed via ABTS from port.  No iocb
1396                          * resources need to be recovered.
1397                          */
1398                         if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1399                                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1400                                                 "0333 IOCB cmd 0x%x"
1401                                                 " processed. Skipping"
1402                                                 " completion\n",
1403                                                 irsp->ulpCommand);
1404                                 break;
1405                         }
1406
1407                         cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1408                                                          &rspiocbq);
1409                         if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1410                                 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1411                                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1412                                                               &rspiocbq);
1413                                 } else {
1414                                         spin_unlock_irqrestore(&phba->hbalock,
1415                                                                iflag);
1416                                         (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1417                                                               &rspiocbq);
1418                                         spin_lock_irqsave(&phba->hbalock,
1419                                                           iflag);
1420                                 }
1421                         }
1422                         break;
1423                 case LPFC_UNSOL_IOCB:
1424                         spin_unlock_irqrestore(&phba->hbalock, iflag);
1425                         lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1426                         spin_lock_irqsave(&phba->hbalock, iflag);
1427                         break;
1428                 default:
1429                         if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1430                                 char adaptermsg[LPFC_MAX_ADPTMSG];
1431                                 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1432                                 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1433                                        MAX_MSG_DATA);
1434                                 dev_warn(&((phba->pcidev)->dev),
1435                                          "lpfc%d: %s\n",
1436                                          phba->brd_no, adaptermsg);
1437                         } else {
1438                                 /* Unknown IOCB command */
1439                                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1440                                                 "0334 Unknown IOCB command "
1441                                                 "Data: x%x, x%x x%x x%x x%x\n",
1442                                                 type, irsp->ulpCommand,
1443                                                 irsp->ulpStatus,
1444                                                 irsp->ulpIoTag,
1445                                                 irsp->ulpContext);
1446                         }
1447                         break;
1448                 }
1449
1450                 /*
1451                  * The response IOCB has been processed.  Update the ring
1452                  * pointer in SLIM.  If the port response put pointer has not
1453                  * been updated, sync the pgp->rspPutInx and fetch the new port
1454                  * response put pointer.
1455                  */
1456                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1457
1458                 if (pring->rspidx == portRspPut)
1459                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1460         }
1461
1462         if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1463                 pring->stats.iocb_rsp_full++;
1464                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1465                 writel(status, phba->CAregaddr);
1466                 readl(phba->CAregaddr);
1467         }
1468         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1469                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1470                 pring->stats.iocb_cmd_empty++;
1471
1472                 /* Force update of the local copy of cmdGetInx */
1473                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1474                 lpfc_sli_resume_iocb(phba, pring);
1475
1476                 if ((pring->lpfc_sli_cmd_available))
1477                         (pring->lpfc_sli_cmd_available) (phba, pring);
1478
1479         }
1480
1481         spin_unlock_irqrestore(&phba->hbalock, iflag);
1482         return rc;
1483 }
1484
1485 int
1486 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1487                                 struct lpfc_sli_ring *pring, uint32_t mask)
1488 {
1489         struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1490                 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1491                 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1492         IOCB_t *entry;
1493         IOCB_t *irsp = NULL;
1494         struct lpfc_iocbq *rspiocbp = NULL;
1495         struct lpfc_iocbq *next_iocb;
1496         struct lpfc_iocbq *cmdiocbp;
1497         struct lpfc_iocbq *saveq;
1498         uint8_t iocb_cmd_type;
1499         lpfc_iocb_type type;
1500         uint32_t status, free_saveq;
1501         uint32_t portRspPut, portRspMax;
1502         int rc = 1;
1503         unsigned long iflag;
1504
1505         spin_lock_irqsave(&phba->hbalock, iflag);
1506         pring->stats.iocb_event++;
1507
1508         /*
1509          * The next available response entry should never exceed the maximum
1510          * entries.  If it does, treat it as an adapter hardware error.
1511          */
1512         portRspMax = pring->numRiocb;
1513         portRspPut = le32_to_cpu(pgp->rspPutInx);
1514         if (portRspPut >= portRspMax) {
1515                 /*
1516                  * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1517                  * rsp ring <portRspMax>
1518                  */
1519                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1520                                 "0303 Ring %d handler: portRspPut %d "
1521                                 "is bigger then rsp ring %d\n",
1522                                 pring->ringno, portRspPut, portRspMax);
1523
1524                 phba->link_state = LPFC_HBA_ERROR;
1525                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1526
1527                 phba->work_hs = HS_FFER3;
1528                 lpfc_handle_eratt(phba);
1529
1530                 return 1;
1531         }
1532
1533         rmb();
1534         while (pring->rspidx != portRspPut) {
1535                 /*
1536                  * Build a completion list and call the appropriate handler.
1537                  * The process is to get the next available response iocb, get
1538                  * a free iocb from the list, copy the response data into the
1539                  * free iocb, insert to the continuation list, and update the
1540                  * next response index to slim.  This process makes response
1541                  * iocb's in the ring available to DMA as fast as possible but
1542                  * pays a penalty for a copy operation.  Since the iocb is
1543                  * only 32 bytes, this penalty is considered small relative to
1544                  * the PCI reads for register values and a slim write.  When
1545                  * the ulpLe field is set, the entire Command has been
1546                  * received.
1547                  */
1548                 entry = lpfc_resp_iocb(phba, pring);
1549
1550                 phba->last_completion_time = jiffies;
1551                 rspiocbp = __lpfc_sli_get_iocbq(phba);
1552                 if (rspiocbp == NULL) {
1553                         printk(KERN_ERR "%s: out of buffers! Failing "
1554                                "completion.\n", __FUNCTION__);
1555                         break;
1556                 }
1557
1558                 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
1559                                       phba->iocb_rsp_size);
1560                 irsp = &rspiocbp->iocb;
1561
1562                 if (++pring->rspidx >= portRspMax)
1563                         pring->rspidx = 0;
1564
1565                 if (pring->ringno == LPFC_ELS_RING) {
1566                         lpfc_debugfs_slow_ring_trc(phba,
1567                         "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1568                                 *(((uint32_t *) irsp) + 4),
1569                                 *(((uint32_t *) irsp) + 6),
1570                                 *(((uint32_t *) irsp) + 7));
1571                 }
1572
1573                 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1574
1575                 if (list_empty(&(pring->iocb_continueq))) {
1576                         list_add(&rspiocbp->list, &(pring->iocb_continueq));
1577                 } else {
1578                         list_add_tail(&rspiocbp->list,
1579                                       &(pring->iocb_continueq));
1580                 }
1581
1582                 pring->iocb_continueq_cnt++;
1583                 if (irsp->ulpLe) {
1584                         /*
1585                          * By default, the driver expects to free all resources
1586                          * associated with this iocb completion.
1587                          */
1588                         free_saveq = 1;
1589                         saveq = list_get_first(&pring->iocb_continueq,
1590                                                struct lpfc_iocbq, list);
1591                         irsp = &(saveq->iocb);
1592                         list_del_init(&pring->iocb_continueq);
1593                         pring->iocb_continueq_cnt = 0;
1594
1595                         pring->stats.iocb_rsp++;
1596
1597                         /*
1598                          * If resource errors reported from HBA, reduce
1599                          * queuedepths of the SCSI device.
1600                          */
1601                         if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1602                              (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1603                                 spin_unlock_irqrestore(&phba->hbalock, iflag);
1604                                 lpfc_adjust_queue_depth(phba);
1605                                 spin_lock_irqsave(&phba->hbalock, iflag);
1606                         }
1607
1608                         if (irsp->ulpStatus) {
1609                                 /* Rsp ring <ringno> error: IOCB */
1610                                 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1611                                                 "0328 Rsp Ring %d error: "
1612                                                 "IOCB Data: "
1613                                                 "x%x x%x x%x x%x "
1614                                                 "x%x x%x x%x x%x "
1615                                                 "x%x x%x x%x x%x "
1616                                                 "x%x x%x x%x x%x\n",
1617                                                 pring->ringno,
1618                                                 irsp->un.ulpWord[0],
1619                                                 irsp->un.ulpWord[1],
1620                                                 irsp->un.ulpWord[2],
1621                                                 irsp->un.ulpWord[3],
1622                                                 irsp->un.ulpWord[4],
1623                                                 irsp->un.ulpWord[5],
1624                                                 *(((uint32_t *) irsp) + 6),
1625                                                 *(((uint32_t *) irsp) + 7),
1626                                                 *(((uint32_t *) irsp) + 8),
1627                                                 *(((uint32_t *) irsp) + 9),
1628                                                 *(((uint32_t *) irsp) + 10),
1629                                                 *(((uint32_t *) irsp) + 11),
1630                                                 *(((uint32_t *) irsp) + 12),
1631                                                 *(((uint32_t *) irsp) + 13),
1632                                                 *(((uint32_t *) irsp) + 14),
1633                                                 *(((uint32_t *) irsp) + 15));
1634                         }
1635
1636                         /*
1637                          * Fetch the IOCB command type and call the correct
1638                          * completion routine.  Solicited and Unsolicited
1639                          * IOCBs on the ELS ring get freed back to the
1640                          * lpfc_iocb_list by the discovery kernel thread.
1641                          */
1642                         iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1643                         type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1644                         if (type == LPFC_SOL_IOCB) {
1645                                 spin_unlock_irqrestore(&phba->hbalock,
1646                                                        iflag);
1647                                 rc = lpfc_sli_process_sol_iocb(phba, pring,
1648                                                                saveq);
1649                                 spin_lock_irqsave(&phba->hbalock, iflag);
1650                         } else if (type == LPFC_UNSOL_IOCB) {
1651                                 spin_unlock_irqrestore(&phba->hbalock,
1652                                                        iflag);
1653                                 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1654                                                                  saveq);
1655                                 spin_lock_irqsave(&phba->hbalock, iflag);
1656                         } else if (type == LPFC_ABORT_IOCB) {
1657                                 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1658                                     ((cmdiocbp =
1659                                       lpfc_sli_iocbq_lookup(phba, pring,
1660                                                             saveq)))) {
1661                                         /* Call the specified completion
1662                                            routine */
1663                                         if (cmdiocbp->iocb_cmpl) {
1664                                                 spin_unlock_irqrestore(
1665                                                        &phba->hbalock,
1666                                                        iflag);
1667                                                 (cmdiocbp->iocb_cmpl) (phba,
1668                                                              cmdiocbp, saveq);
1669                                                 spin_lock_irqsave(
1670                                                           &phba->hbalock,
1671                                                           iflag);
1672                                         } else
1673                                                 __lpfc_sli_release_iocbq(phba,
1674                                                                       cmdiocbp);
1675                                 }
1676                         } else if (type == LPFC_UNKNOWN_IOCB) {
1677                                 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1678
1679                                         char adaptermsg[LPFC_MAX_ADPTMSG];
1680
1681                                         memset(adaptermsg, 0,
1682                                                LPFC_MAX_ADPTMSG);
1683                                         memcpy(&adaptermsg[0], (uint8_t *) irsp,
1684                                                MAX_MSG_DATA);
1685                                         dev_warn(&((phba->pcidev)->dev),
1686                                                  "lpfc%d: %s\n",
1687                                                  phba->brd_no, adaptermsg);
1688                                 } else {
1689                                         /* Unknown IOCB command */
1690                                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1691                                                         "0335 Unknown IOCB "
1692                                                         "command Data: x%x "
1693                                                         "x%x x%x x%x\n",
1694                                                         irsp->ulpCommand,
1695                                                         irsp->ulpStatus,
1696                                                         irsp->ulpIoTag,
1697                                                         irsp->ulpContext);
1698                                 }
1699                         }
1700
1701                         if (free_saveq) {
1702                                 list_for_each_entry_safe(rspiocbp, next_iocb,
1703                                                          &saveq->list, list) {
1704                                         list_del(&rspiocbp->list);
1705                                         __lpfc_sli_release_iocbq(phba,
1706                                                                  rspiocbp);
1707                                 }
1708                                 __lpfc_sli_release_iocbq(phba, saveq);
1709                         }
1710                         rspiocbp = NULL;
1711                 }
1712
1713                 /*
1714                  * If the port response put pointer has not been updated, sync
1715                  * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1716                  * response put pointer.
1717                  */
1718                 if (pring->rspidx == portRspPut) {
1719                         portRspPut = le32_to_cpu(pgp->rspPutInx);
1720                 }
1721         } /* while (pring->rspidx != portRspPut) */
1722
1723         if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
1724                 /* At least one response entry has been freed */
1725                 pring->stats.iocb_rsp_full++;
1726                 /* SET RxRE_RSP in Chip Att register */
1727                 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1728                 writel(status, phba->CAregaddr);
1729                 readl(phba->CAregaddr); /* flush */
1730         }
1731         if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1732                 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1733                 pring->stats.iocb_cmd_empty++;
1734
1735                 /* Force update of the local copy of cmdGetInx */
1736                 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1737                 lpfc_sli_resume_iocb(phba, pring);
1738
1739                 if ((pring->lpfc_sli_cmd_available))
1740                         (pring->lpfc_sli_cmd_available) (phba, pring);
1741
1742         }
1743
1744         spin_unlock_irqrestore(&phba->hbalock, iflag);
1745         return rc;
1746 }
1747
1748 void
1749 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1750 {
1751         LIST_HEAD(completions);
1752         struct lpfc_iocbq *iocb, *next_iocb;
1753         IOCB_t *cmd = NULL;
1754
1755         if (pring->ringno == LPFC_ELS_RING) {
1756                 lpfc_fabric_abort_hba(phba);
1757         }
1758
1759         /* Error everything on txq and txcmplq
1760          * First do the txq.
1761          */
1762         spin_lock_irq(&phba->hbalock);
1763         list_splice_init(&pring->txq, &completions);
1764         pring->txq_cnt = 0;
1765
1766         /* Next issue ABTS for everything on the txcmplq */
1767         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1768                 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1769
1770         spin_unlock_irq(&phba->hbalock);
1771
1772         while (!list_empty(&completions)) {
1773                 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1774                 cmd = &iocb->iocb;
1775                 list_del_init(&iocb->list);
1776
1777                 if (!iocb->iocb_cmpl)
1778                         lpfc_sli_release_iocbq(phba, iocb);
1779                 else {
1780                         cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1781                         cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1782                         (iocb->iocb_cmpl) (phba, iocb, iocb);
1783                 }
1784         }
1785 }
1786
1787 int
1788 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1789 {
1790         uint32_t status;
1791         int i = 0;
1792         int retval = 0;
1793
1794         /* Read the HBA Host Status Register */
1795         status = readl(phba->HSregaddr);
1796
1797         /*
1798          * Check status register every 100ms for 5 retries, then every
1799          * 500ms for 5, then every 2.5 sec for 5, then reset board and
1800          * every 2.5 sec for 4.
1801          * Break our of the loop if errors occurred during init.
1802          */
1803         while (((status & mask) != mask) &&
1804                !(status & HS_FFERM) &&
1805                i++ < 20) {
1806
1807                 if (i <= 5)
1808                         msleep(10);
1809                 else if (i <= 10)
1810                         msleep(500);
1811                 else
1812                         msleep(2500);
1813
1814                 if (i == 15) {
1815                                 /* Do post */
1816                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1817                         lpfc_sli_brdrestart(phba);
1818                 }
1819                 /* Read the HBA Host Status Register */
1820                 status = readl(phba->HSregaddr);
1821         }
1822
1823         /* Check to see if any errors occurred during init */
1824         if ((status & HS_FFERM) || (i >= 20)) {
1825                 phba->link_state = LPFC_HBA_ERROR;
1826                 retval = 1;
1827         }
1828
1829         return retval;
1830 }
1831
1832 #define BARRIER_TEST_PATTERN (0xdeadbeef)
1833
1834 void lpfc_reset_barrier(struct lpfc_hba *phba)
1835 {
1836         uint32_t __iomem *resp_buf;
1837         uint32_t __iomem *mbox_buf;
1838         volatile uint32_t mbox;
1839         uint32_t hc_copy;
1840         int  i;
1841         uint8_t hdrtype;
1842
1843         pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1844         if (hdrtype != 0x80 ||
1845             (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1846              FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1847                 return;
1848
1849         /*
1850          * Tell the other part of the chip to suspend temporarily all
1851          * its DMA activity.
1852          */
1853         resp_buf = phba->MBslimaddr;
1854
1855         /* Disable the error attention */
1856         hc_copy = readl(phba->HCregaddr);
1857         writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1858         readl(phba->HCregaddr); /* flush */
1859         phba->link_flag |= LS_IGNORE_ERATT;
1860
1861         if (readl(phba->HAregaddr) & HA_ERATT) {
1862                 /* Clear Chip error bit */
1863                 writel(HA_ERATT, phba->HAregaddr);
1864                 phba->pport->stopped = 1;
1865         }
1866
1867         mbox = 0;
1868         ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1869         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1870
1871         writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1872         mbox_buf = phba->MBslimaddr;
1873         writel(mbox, mbox_buf);
1874
1875         for (i = 0;
1876              readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1877                 mdelay(1);
1878
1879         if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1880                 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1881                     phba->pport->stopped)
1882                         goto restore_hc;
1883                 else
1884                         goto clear_errat;
1885         }
1886
1887         ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1888         for (i = 0; readl(resp_buf) != mbox &&  i < 500; i++)
1889                 mdelay(1);
1890
1891 clear_errat:
1892
1893         while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1894                 mdelay(1);
1895
1896         if (readl(phba->HAregaddr) & HA_ERATT) {
1897                 writel(HA_ERATT, phba->HAregaddr);
1898                 phba->pport->stopped = 1;
1899         }
1900
1901 restore_hc:
1902         phba->link_flag &= ~LS_IGNORE_ERATT;
1903         writel(hc_copy, phba->HCregaddr);
1904         readl(phba->HCregaddr); /* flush */
1905 }
1906
1907 int
1908 lpfc_sli_brdkill(struct lpfc_hba *phba)
1909 {
1910         struct lpfc_sli *psli;
1911         LPFC_MBOXQ_t *pmb;
1912         uint32_t status;
1913         uint32_t ha_copy;
1914         int retval;
1915         int i = 0;
1916
1917         psli = &phba->sli;
1918
1919         /* Kill HBA */
1920         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1921                         "0329 Kill HBA Data: x%x x%x\n",
1922                         phba->pport->port_state, psli->sli_flag);
1923
1924         if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1925                                                   GFP_KERNEL)) == 0)
1926                 return 1;
1927
1928         /* Disable the error attention */
1929         spin_lock_irq(&phba->hbalock);
1930         status = readl(phba->HCregaddr);
1931         status &= ~HC_ERINT_ENA;
1932         writel(status, phba->HCregaddr);
1933         readl(phba->HCregaddr); /* flush */
1934         phba->link_flag |= LS_IGNORE_ERATT;
1935         spin_unlock_irq(&phba->hbalock);
1936
1937         lpfc_kill_board(phba, pmb);
1938         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1939         retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1940
1941         if (retval != MBX_SUCCESS) {
1942                 if (retval != MBX_BUSY)
1943                         mempool_free(pmb, phba->mbox_mem_pool);
1944                 spin_lock_irq(&phba->hbalock);
1945                 phba->link_flag &= ~LS_IGNORE_ERATT;
1946                 spin_unlock_irq(&phba->hbalock);
1947                 return 1;
1948         }
1949
1950         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1951
1952         mempool_free(pmb, phba->mbox_mem_pool);
1953
1954         /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
1955          * attention every 100ms for 3 seconds. If we don't get ERATT after
1956          * 3 seconds we still set HBA_ERROR state because the status of the
1957          * board is now undefined.
1958          */
1959         ha_copy = readl(phba->HAregaddr);
1960
1961         while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
1962                 mdelay(100);
1963                 ha_copy = readl(phba->HAregaddr);
1964         }
1965
1966         del_timer_sync(&psli->mbox_tmo);
1967         if (ha_copy & HA_ERATT) {
1968                 writel(HA_ERATT, phba->HAregaddr);
1969                 phba->pport->stopped = 1;
1970         }
1971         spin_lock_irq(&phba->hbalock);
1972         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1973         phba->link_flag &= ~LS_IGNORE_ERATT;
1974         spin_unlock_irq(&phba->hbalock);
1975
1976         psli->mbox_active = NULL;
1977         lpfc_hba_down_post(phba);
1978         phba->link_state = LPFC_HBA_ERROR;
1979
1980         return ha_copy & HA_ERATT ? 0 : 1;
1981 }
1982
1983 int
1984 lpfc_sli_brdreset(struct lpfc_hba *phba)
1985 {
1986         struct lpfc_sli *psli;
1987         struct lpfc_sli_ring *pring;
1988         uint16_t cfg_value;
1989         int i;
1990
1991         psli = &phba->sli;
1992
1993         /* Reset HBA */
1994         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1995                         "0325 Reset HBA Data: x%x x%x\n",
1996                         phba->pport->port_state, psli->sli_flag);
1997
1998         /* perform board reset */
1999         phba->fc_eventTag = 0;
2000         phba->pport->fc_myDID = 0;
2001         phba->pport->fc_prevDID = 0;
2002
2003         /* Turn off parity checking and serr during the physical reset */
2004         pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
2005         pci_write_config_word(phba->pcidev, PCI_COMMAND,
2006                               (cfg_value &
2007                                ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2008
2009         psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
2010         /* Now toggle INITFF bit in the Host Control Register */
2011         writel(HC_INITFF, phba->HCregaddr);
2012         mdelay(1);
2013         readl(phba->HCregaddr); /* flush */
2014         writel(0, phba->HCregaddr);
2015         readl(phba->HCregaddr); /* flush */
2016
2017         /* Restore PCI cmd register */
2018         pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
2019
2020         /* Initialize relevant SLI info */
2021         for (i = 0; i < psli->num_rings; i++) {
2022                 pring = &psli->ring[i];
2023                 pring->flag = 0;
2024                 pring->rspidx = 0;
2025                 pring->next_cmdidx  = 0;
2026                 pring->local_getidx = 0;
2027                 pring->cmdidx = 0;
2028                 pring->missbufcnt = 0;
2029         }
2030
2031         phba->link_state = LPFC_WARM_START;
2032         return 0;
2033 }
2034
2035 int
2036 lpfc_sli_brdrestart(struct lpfc_hba *phba)
2037 {
2038         MAILBOX_t *mb;
2039         struct lpfc_sli *psli;
2040         uint16_t skip_post;
2041         volatile uint32_t word0;
2042         void __iomem *to_slim;
2043
2044         spin_lock_irq(&phba->hbalock);
2045
2046         psli = &phba->sli;
2047
2048         /* Restart HBA */
2049         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2050                         "0337 Restart HBA Data: x%x x%x\n",
2051                         phba->pport->port_state, psli->sli_flag);
2052
2053         word0 = 0;
2054         mb = (MAILBOX_t *) &word0;
2055         mb->mbxCommand = MBX_RESTART;
2056         mb->mbxHc = 1;
2057
2058         lpfc_reset_barrier(phba);
2059
2060         to_slim = phba->MBslimaddr;
2061         writel(*(uint32_t *) mb, to_slim);
2062         readl(to_slim); /* flush */
2063
2064         /* Only skip post after fc_ffinit is completed */
2065         if (phba->pport->port_state) {
2066                 skip_post = 1;
2067                 word0 = 1;      /* This is really setting up word1 */
2068         } else {
2069                 skip_post = 0;
2070                 word0 = 0;      /* This is really setting up word1 */
2071         }
2072         to_slim = phba->MBslimaddr + sizeof (uint32_t);
2073         writel(*(uint32_t *) mb, to_slim);
2074         readl(to_slim); /* flush */
2075
2076         lpfc_sli_brdreset(phba);
2077         phba->pport->stopped = 0;
2078         phba->link_state = LPFC_INIT_START;
2079
2080         spin_unlock_irq(&phba->hbalock);
2081
2082         memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
2083         psli->stats_start = get_seconds();
2084
2085         if (skip_post)
2086                 mdelay(100);
2087         else
2088                 mdelay(2000);
2089
2090         lpfc_hba_down_post(phba);
2091
2092         return 0;
2093 }
2094
2095 static int
2096 lpfc_sli_chipset_init(struct lpfc_hba *phba)
2097 {
2098         uint32_t status, i = 0;
2099
2100         /* Read the HBA Host Status Register */
2101         status = readl(phba->HSregaddr);
2102
2103         /* Check status register to see what current state is */
2104         i = 0;
2105         while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
2106
2107                 /* Check every 100ms for 5 retries, then every 500ms for 5, then
2108                  * every 2.5 sec for 5, then reset board and every 2.5 sec for
2109                  * 4.
2110                  */
2111                 if (i++ >= 20) {
2112                         /* Adapter failed to init, timeout, status reg
2113                            <status> */
2114                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2115                                         "0436 Adapter failed to init, "
2116                                         "timeout, status reg x%x\n", status);
2117                         phba->link_state = LPFC_HBA_ERROR;
2118                         return -ETIMEDOUT;
2119                 }
2120
2121                 /* Check to see if any errors occurred during init */
2122                 if (status & HS_FFERM) {
2123                         /* ERROR: During chipset initialization */
2124                         /* Adapter failed to init, chipset, status reg
2125                            <status> */
2126                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2127                                         "0437 Adapter failed to init, "
2128                                         "chipset, status reg x%x\n", status);
2129                         phba->link_state = LPFC_HBA_ERROR;
2130                         return -EIO;
2131                 }
2132
2133                 if (i <= 5) {
2134                         msleep(10);
2135                 } else if (i <= 10) {
2136                         msleep(500);
2137                 } else {
2138                         msleep(2500);
2139                 }
2140
2141                 if (i == 15) {
2142                                 /* Do post */
2143                         phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2144                         lpfc_sli_brdrestart(phba);
2145                 }
2146                 /* Read the HBA Host Status Register */
2147                 status = readl(phba->HSregaddr);
2148         }
2149
2150         /* Check to see if any errors occurred during init */
2151         if (status & HS_FFERM) {
2152                 /* ERROR: During chipset initialization */
2153                 /* Adapter failed to init, chipset, status reg <status> */
2154                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2155                                 "0438 Adapter failed to init, chipset, "
2156                                 "status reg x%x\n", status);
2157                 phba->link_state = LPFC_HBA_ERROR;
2158                 return -EIO;
2159         }
2160
2161         /* Clear all interrupt enable conditions */
2162         writel(0, phba->HCregaddr);
2163         readl(phba->HCregaddr); /* flush */
2164
2165         /* setup host attn register */
2166         writel(0xffffffff, phba->HAregaddr);
2167         readl(phba->HAregaddr); /* flush */
2168         return 0;
2169 }
2170
2171 int
2172 lpfc_sli_hbq_count(void)
2173 {
2174         return ARRAY_SIZE(lpfc_hbq_defs);
2175 }
2176
2177 static int
2178 lpfc_sli_hbq_entry_count(void)
2179 {
2180         int  hbq_count = lpfc_sli_hbq_count();
2181         int  count = 0;
2182         int  i;
2183
2184         for (i = 0; i < hbq_count; ++i)
2185                 count += lpfc_hbq_defs[i]->entry_count;
2186         return count;
2187 }
2188
2189 int
2190 lpfc_sli_hbq_size(void)
2191 {
2192         return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
2193 }
2194
2195 static int
2196 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2197 {
2198         int  hbq_count = lpfc_sli_hbq_count();
2199         LPFC_MBOXQ_t *pmb;
2200         MAILBOX_t *pmbox;
2201         uint32_t hbqno;
2202         uint32_t hbq_entry_index;
2203
2204                                 /* Get a Mailbox buffer to setup mailbox
2205                                  * commands for HBA initialization
2206                                  */
2207         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2208
2209         if (!pmb)
2210                 return -ENOMEM;
2211
2212         pmbox = &pmb->mb;
2213
2214         /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2215         phba->link_state = LPFC_INIT_MBX_CMDS;
2216
2217         hbq_entry_index = 0;
2218         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2219                 phba->hbqs[hbqno].next_hbqPutIdx = 0;
2220                 phba->hbqs[hbqno].hbqPutIdx      = 0;
2221                 phba->hbqs[hbqno].local_hbqGetIdx   = 0;
2222                 phba->hbqs[hbqno].entry_count =
2223                         lpfc_hbq_defs[hbqno]->entry_count;
2224                 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
2225                         hbq_entry_index, pmb);
2226                 hbq_entry_index += phba->hbqs[hbqno].entry_count;
2227
2228                 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
2229                         /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
2230                            mbxStatus <status>, ring <num> */
2231
2232                         lpfc_printf_log(phba, KERN_ERR,
2233                                         LOG_SLI | LOG_VPORT,
2234                                         "1805 Adapter failed to init. "
2235                                         "Data: x%x x%x x%x\n",
2236                                         pmbox->mbxCommand,
2237                                         pmbox->mbxStatus, hbqno);
2238
2239                         phba->link_state = LPFC_HBA_ERROR;
2240                         mempool_free(pmb, phba->mbox_mem_pool);
2241                         return ENXIO;
2242                 }
2243         }
2244         phba->hbq_count = hbq_count;
2245
2246         mempool_free(pmb, phba->mbox_mem_pool);
2247
2248         /* Initially populate or replenish the HBQs */
2249         for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2250                 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2251                         return -ENOMEM;
2252         }
2253         return 0;
2254 }
2255
2256 static int
2257 lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2258 {
2259         LPFC_MBOXQ_t *pmb;
2260         uint32_t resetcount = 0, rc = 0, done = 0;
2261
2262         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2263         if (!pmb) {
2264                 phba->link_state = LPFC_HBA_ERROR;
2265                 return -ENOMEM;
2266         }
2267
2268         phba->sli_rev = sli_mode;
2269         while (resetcount < 2 && !done) {
2270                 spin_lock_irq(&phba->hbalock);
2271                 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2272                 spin_unlock_irq(&phba->hbalock);
2273                 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2274                 lpfc_sli_brdrestart(phba);
2275                 msleep(2500);
2276                 rc = lpfc_sli_chipset_init(phba);
2277                 if (rc)
2278                         break;
2279
2280                 spin_lock_irq(&phba->hbalock);
2281                 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2282                 spin_unlock_irq(&phba->hbalock);
2283                 resetcount++;
2284
2285                 /* Call pre CONFIG_PORT mailbox command initialization.  A
2286                  * value of 0 means the call was successful.  Any other
2287                  * nonzero value is a failure, but if ERESTART is returned,
2288                  * the driver may reset the HBA and try again.
2289                  */
2290                 rc = lpfc_config_port_prep(phba);
2291                 if (rc == -ERESTART) {
2292                         phba->link_state = LPFC_LINK_UNKNOWN;
2293                         continue;
2294                 } else if (rc) {
2295                         break;
2296                 }
2297
2298                 phba->link_state = LPFC_INIT_MBX_CMDS;
2299                 lpfc_config_port(phba, pmb);
2300                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2301                 if (rc != MBX_SUCCESS) {
2302                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2303                                 "0442 Adapter failed to init, mbxCmd x%x "
2304                                 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
2305                                 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0);
2306                         spin_lock_irq(&phba->hbalock);
2307                         phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2308                         spin_unlock_irq(&phba->hbalock);
2309                         rc = -ENXIO;
2310                 } else {
2311                         done = 1;
2312                         phba->max_vpi = (phba->max_vpi &&
2313                                          pmb->mb.un.varCfgPort.gmv) != 0
2314                                 ? pmb->mb.un.varCfgPort.max_vpi
2315                                 : 0;
2316                 }
2317         }
2318
2319         if (!done) {
2320                 rc = -EINVAL;
2321                 goto do_prep_failed;
2322         }
2323
2324         if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2325                 (!pmb->mb.un.varCfgPort.cMA)) {
2326                 rc = -ENXIO;
2327                 goto do_prep_failed;
2328         }
2329         return rc;
2330
2331 do_prep_failed:
2332         mempool_free(pmb, phba->mbox_mem_pool);
2333         return rc;
2334 }
2335
2336 int
2337 lpfc_sli_hba_setup(struct lpfc_hba *phba)
2338 {
2339         uint32_t rc;
2340         int  mode = 3;
2341
2342         switch (lpfc_sli_mode) {
2343         case 2:
2344                 if (phba->cfg_enable_npiv) {
2345                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2346                                 "1824 NPIV enabled: Override lpfc_sli_mode "
2347                                 "parameter (%d) to auto (0).\n",
2348                                 lpfc_sli_mode);
2349                         break;
2350                 }
2351                 mode = 2;
2352                 break;
2353         case 0:
2354         case 3:
2355                 break;
2356         default:
2357                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2358                                 "1819 Unrecognized lpfc_sli_mode "
2359                                 "parameter: %d.\n", lpfc_sli_mode);
2360
2361                 break;
2362         }
2363
2364         rc = lpfc_do_config_port(phba, mode);
2365         if (rc && lpfc_sli_mode == 3)
2366                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2367                                 "1820 Unable to select SLI-3.  "
2368                                 "Not supported by adapter.\n");
2369         if (rc && mode != 2)
2370                 rc = lpfc_do_config_port(phba, 2);
2371         if (rc)
2372                 goto lpfc_sli_hba_setup_error;
2373
2374         if (phba->sli_rev == 3) {
2375                 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
2376                 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
2377                 phba->sli3_options |= LPFC_SLI3_ENABLED;
2378                 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
2379
2380         } else {
2381                 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2382                 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
2383                 phba->sli3_options = 0;
2384         }
2385
2386         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2387                         "0444 Firmware in SLI %x mode. Max_vpi %d\n",
2388                         phba->sli_rev, phba->max_vpi);
2389         rc = lpfc_sli_ring_map(phba);
2390
2391         if (rc)
2392                 goto lpfc_sli_hba_setup_error;
2393
2394                                 /* Init HBQs */
2395
2396         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2397                 rc = lpfc_sli_hbq_setup(phba);
2398                 if (rc)
2399                         goto lpfc_sli_hba_setup_error;
2400         }
2401
2402         phba->sli.sli_flag |= LPFC_PROCESS_LA;
2403
2404         rc = lpfc_config_port_post(phba);
2405         if (rc)
2406                 goto lpfc_sli_hba_setup_error;
2407
2408         return rc;
2409
2410 lpfc_sli_hba_setup_error:
2411         phba->link_state = LPFC_HBA_ERROR;
2412         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2413                         "0445 Firmware initialization failed\n");
2414         return rc;
2415 }
2416
2417 /*! lpfc_mbox_timeout
2418  *
2419  * \pre
2420  * \post
2421  * \param hba Pointer to per struct lpfc_hba structure
2422  * \param l1  Pointer to the driver's mailbox queue.
2423  * \return
2424  *   void
2425  *
2426  * \b Description:
2427  *
2428  * This routine handles mailbox timeout events at timer interrupt context.
2429  */
2430 void
2431 lpfc_mbox_timeout(unsigned long ptr)
2432 {
2433         struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
2434         unsigned long iflag;
2435         uint32_t tmo_posted;
2436
2437         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2438         tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2439         if (!tmo_posted)
2440                 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2441         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2442
2443         if (!tmo_posted) {
2444                 spin_lock_irqsave(&phba->hbalock, iflag);
2445                 if (phba->work_wait)
2446                         lpfc_worker_wake_up(phba);
2447                 spin_unlock_irqrestore(&phba->hbalock, iflag);
2448         }
2449 }
2450
2451 void
2452 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2453 {
2454         LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
2455         MAILBOX_t *mb = &pmbox->mb;
2456         struct lpfc_sli *psli = &phba->sli;
2457         struct lpfc_sli_ring *pring;
2458
2459         if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
2460                 return;
2461         }
2462
2463         /* Mbox cmd <mbxCommand> timeout */
2464         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2465                         "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
2466                         mb->mbxCommand,
2467                         phba->pport->port_state,
2468                         phba->sli.sli_flag,
2469                         phba->sli.mbox_active);
2470
2471         /* Setting state unknown so lpfc_sli_abort_iocb_ring
2472          * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2473          * it to fail all oustanding SCSI IO.
2474          */
2475         spin_lock_irq(&phba->pport->work_port_lock);
2476         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2477         spin_unlock_irq(&phba->pport->work_port_lock);
2478         spin_lock_irq(&phba->hbalock);
2479         phba->link_state = LPFC_LINK_UNKNOWN;
2480         phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2481         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2482         spin_unlock_irq(&phba->hbalock);
2483
2484         pring = &psli->ring[psli->fcp_ring];
2485         lpfc_sli_abort_iocb_ring(phba, pring);
2486
2487         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2488                         "0316 Resetting board due to mailbox timeout\n");
2489         /*
2490          * lpfc_offline calls lpfc_sli_hba_down which will clean up
2491          * on oustanding mailbox commands.
2492          */
2493         lpfc_offline_prep(phba);
2494         lpfc_offline(phba);
2495         lpfc_sli_brdrestart(phba);
2496         if (lpfc_online(phba) == 0)             /* Initialize the HBA */
2497                 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2498         lpfc_unblock_mgmt_io(phba);
2499         return;
2500 }
2501
2502 int
2503 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2504 {
2505         MAILBOX_t *mb;
2506         struct lpfc_sli *psli = &phba->sli;
2507         uint32_t status, evtctr;
2508         uint32_t ha_copy;
2509         int i;
2510         unsigned long drvr_flag = 0;
2511         volatile uint32_t word0, ldata;
2512         void __iomem *to_slim;
2513
2514         if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2515                 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2516                 if(!pmbox->vport) {
2517                         lpfc_printf_log(phba, KERN_ERR,
2518                                         LOG_MBOX | LOG_VPORT,
2519                                         "1806 Mbox x%x failed. No vport\n",
2520                                         pmbox->mb.mbxCommand);
2521                         dump_stack();
2522                         return MBXERR_ERROR;
2523                 }
2524         }
2525
2526
2527         /* If the PCI channel is in offline state, do not post mbox. */
2528         if (unlikely(pci_channel_offline(phba->pcidev)))
2529                 return MBX_NOT_FINISHED;
2530
2531         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2532         psli = &phba->sli;
2533
2534
2535         mb = &pmbox->mb;
2536         status = MBX_SUCCESS;
2537
2538         if (phba->link_state == LPFC_HBA_ERROR) {
2539                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2540
2541                 /* Mbox command <mbxCommand> cannot issue */
2542                 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2543                 return MBX_NOT_FINISHED;
2544         }
2545
2546         if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2547             !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2548                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2549                 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2550                 return MBX_NOT_FINISHED;
2551         }
2552
2553         if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2554                 /* Polling for a mbox command when another one is already active
2555                  * is not allowed in SLI. Also, the driver must have established
2556                  * SLI2 mode to queue and process multiple mbox commands.
2557                  */
2558
2559                 if (flag & MBX_POLL) {
2560                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2561
2562                         /* Mbox command <mbxCommand> cannot issue */
2563                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2564                         return MBX_NOT_FINISHED;
2565                 }
2566
2567                 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2568                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2569                         /* Mbox command <mbxCommand> cannot issue */
2570                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2571                         return MBX_NOT_FINISHED;
2572                 }
2573
2574                 /* Handle STOP IOCB processing flag. This is only meaningful
2575                  * if we are not polling for mbox completion.
2576                  */
2577                 if (flag & MBX_STOP_IOCB) {
2578                         flag &= ~MBX_STOP_IOCB;
2579                         /* Now flag each ring */
2580                         for (i = 0; i < psli->num_rings; i++) {
2581                                 /* If the ring is active, flag it */
2582                                 if (psli->ring[i].cmdringaddr) {
2583                                         psli->ring[i].flag |=
2584                                             LPFC_STOP_IOCB_MBX;
2585                                 }
2586                         }
2587                 }
2588
2589                 /* Another mailbox command is still being processed, queue this
2590                  * command to be processed later.
2591                  */
2592                 lpfc_mbox_put(phba, pmbox);
2593
2594                 /* Mbox cmd issue - BUSY */
2595                 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2596                                 "(%d):0308 Mbox cmd issue - BUSY Data: "
2597                                 "x%x x%x x%x x%x\n",
2598                                 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2599                                 mb->mbxCommand, phba->pport->port_state,
2600                                 psli->sli_flag, flag);
2601
2602                 psli->slistat.mbox_busy++;
2603                 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2604
2605                 if (pmbox->vport) {
2606                         lpfc_debugfs_disc_trc(pmbox->vport,
2607                                 LPFC_DISC_TRC_MBOX_VPORT,
2608                                 "MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
2609                                 (uint32_t)mb->mbxCommand,
2610                                 mb->un.varWords[0], mb->un.varWords[1]);
2611                 }
2612                 else {
2613                         lpfc_debugfs_disc_trc(phba->pport,
2614                                 LPFC_DISC_TRC_MBOX,
2615                                 "MBOX Bsy:        cmd:x%x mb:x%x x%x",
2616                                 (uint32_t)mb->mbxCommand,
2617                                 mb->un.varWords[0], mb->un.varWords[1]);
2618                 }
2619
2620                 return MBX_BUSY;
2621         }
2622
2623         /* Handle STOP IOCB processing flag. This is only meaningful
2624          * if we are not polling for mbox completion.
2625          */
2626         if (flag & MBX_STOP_IOCB) {
2627                 flag &= ~MBX_STOP_IOCB;
2628                 if (flag == MBX_NOWAIT) {
2629                         /* Now flag each ring */
2630                         for (i = 0; i < psli->num_rings; i++) {
2631                                 /* If the ring is active, flag it */
2632                                 if (psli->ring[i].cmdringaddr) {
2633                                         psli->ring[i].flag |=
2634                                             LPFC_STOP_IOCB_MBX;
2635                                 }
2636                         }
2637                 }
2638         }
2639
2640         psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2641
2642         /* If we are not polling, we MUST be in SLI2 mode */
2643         if (flag != MBX_POLL) {
2644                 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2645                     (mb->mbxCommand != MBX_KILL_BOARD)) {
2646                         psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2647                         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2648                         /* Mbox command <mbxCommand> cannot issue */
2649                         LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2650                         return MBX_NOT_FINISHED;
2651                 }
2652                 /* timeout active mbox command */
2653                 mod_timer(&psli->mbox_tmo, (jiffies +
2654                                (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2655         }
2656
2657         /* Mailbox cmd <cmd> issue */
2658         lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2659                         "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
2660                         "x%x\n",
2661                         pmbox->vport ? pmbox->vport->vpi : 0,
2662                         mb->mbxCommand, phba->pport->port_state,
2663                         psli->sli_flag, flag);
2664
2665         if (mb->mbxCommand != MBX_HEARTBEAT) {
2666                 if (pmbox->vport) {
2667                         lpfc_debugfs_disc_trc(pmbox->vport,
2668                                 LPFC_DISC_TRC_MBOX_VPORT,
2669                                 "MBOX Send vport: cmd:x%x mb:x%x x%x",
2670                                 (uint32_t)mb->mbxCommand,
2671                                 mb->un.varWords[0], mb->un.varWords[1]);
2672                 }
2673                 else {
2674                         lpfc_debugfs_disc_trc(phba->pport,
2675                                 LPFC_DISC_TRC_MBOX,
2676                                 "MBOX Send:       cmd:x%x mb:x%x x%x",
2677                                 (uint32_t)mb->mbxCommand,
2678                                 mb->un.varWords[0], mb->un.varWords[1]);
2679                 }
2680         }
2681
2682         psli->slistat.mbox_cmd++;
2683         evtctr = psli->slistat.mbox_event;
2684
2685         /* next set own bit for the adapter and copy over command word */
2686         mb->mbxOwner = OWN_CHIP;
2687
2688         if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2689                 /* First copy command data to host SLIM area */
2690                 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
2691         } else {
2692                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2693                         /* copy command data into host mbox for cmpl */
2694                         lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2695                                               MAILBOX_CMD_SIZE);
2696                 }
2697
2698                 /* First copy mbox command data to HBA SLIM, skip past first
2699                    word */
2700                 to_slim = phba->MBslimaddr + sizeof (uint32_t);
2701                 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2702                             MAILBOX_CMD_SIZE - sizeof (uint32_t));
2703
2704                 /* Next copy over first word, with mbxOwner set */
2705                 ldata = *((volatile uint32_t *)mb);
2706                 to_slim = phba->MBslimaddr;
2707                 writel(ldata, to_slim);
2708                 readl(to_slim); /* flush */
2709
2710                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2711                         /* switch over to host mailbox */
2712                         psli->sli_flag |= LPFC_SLI2_ACTIVE;
2713                 }
2714         }
2715
2716         wmb();
2717         /* interrupt board to doit right away */
2718         writel(CA_MBATT, phba->CAregaddr);
2719         readl(phba->CAregaddr); /* flush */
2720
2721         switch (flag) {
2722         case MBX_NOWAIT:
2723                 /* Don't wait for it to finish, just return */
2724                 psli->mbox_active = pmbox;
2725                 break;
2726
2727         case MBX_POLL:
2728                 psli->mbox_active = NULL;
2729                 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2730                         /* First read mbox status word */
2731                         word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
2732                         word0 = le32_to_cpu(word0);
2733                 } else {
2734                         /* First read mbox status word */
2735                         word0 = readl(phba->MBslimaddr);
2736                 }
2737
2738                 /* Read the HBA Host Attention Register */
2739                 ha_copy = readl(phba->HAregaddr);
2740
2741                 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2742                 i *= 1000; /* Convert to ms */
2743
2744                 /* Wait for command to complete */
2745                 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2746                        (!(ha_copy & HA_MBATT) &&
2747                         (phba->link_state > LPFC_WARM_START))) {
2748                         if (i-- <= 0) {
2749                                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2750                                 spin_unlock_irqrestore(&phba->hbalock,
2751                                                        drvr_flag);
2752                                 return MBX_NOT_FINISHED;
2753                         }
2754
2755                         /* Check if we took a mbox interrupt while we were
2756                            polling */
2757                         if (((word0 & OWN_CHIP) != OWN_CHIP)
2758                             && (evtctr != psli->slistat.mbox_event))
2759                                 break;
2760
2761                         spin_unlock_irqrestore(&phba->hbalock,
2762                                                drvr_flag);
2763
2764                         msleep(1);
2765
2766                         spin_lock_irqsave(&phba->hbalock, drvr_flag);
2767
2768                         if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2769                                 /* First copy command data */
2770                                 word0 = *((volatile uint32_t *)
2771                                                 &phba->slim2p->mbx);
2772                                 word0 = le32_to_cpu(word0);
2773                                 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2774                                         MAILBOX_t *slimmb;
2775                                         volatile uint32_t slimword0;
2776                                         /* Check real SLIM for any errors */
2777                                         slimword0 = readl(phba->MBslimaddr);
2778                                         slimmb = (MAILBOX_t *) & slimword0;
2779                                         if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2780                                             && slimmb->mbxStatus) {
2781                                                 psli->sli_flag &=
2782                                                     ~LPFC_SLI2_ACTIVE;
2783                                                 word0 = slimword0;
2784                                         }
2785                                 }
2786                         } else {
2787                                 /* First copy command data */
2788                                 word0 = readl(phba->MBslimaddr);
2789                         }
2790                         /* Read the HBA Host Attention Register */
2791                         ha_copy = readl(phba->HAregaddr);
2792                 }
2793
2794                 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2795                         /* copy results back to user */
2796                         lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2797                                               MAILBOX_CMD_SIZE);
2798                 } else {
2799                         /* First copy command data */
2800                         lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2801                                                         MAILBOX_CMD_SIZE);
2802                         if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2803                                 pmbox->context2) {
2804                                 lpfc_memcpy_from_slim((void *)pmbox->context2,
2805                                       phba->MBslimaddr + DMP_RSP_OFFSET,
2806                                                       mb->un.varDmp.word_cnt);
2807                         }
2808                 }
2809
2810                 writel(HA_MBATT, phba->HAregaddr);
2811                 readl(phba->HAregaddr); /* flush */
2812
2813                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2814                 status = mb->mbxStatus;
2815         }
2816
2817         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2818         return status;
2819 }
2820
2821 /*
2822  * Caller needs to hold lock.
2823  */
2824 static void
2825 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2826                     struct lpfc_iocbq *piocb)
2827 {
2828         /* Insert the caller's iocb in the txq tail for later processing. */
2829         list_add_tail(&piocb->list, &pring->txq);
2830         pring->txq_cnt++;
2831 }
2832
2833 static struct lpfc_iocbq *
2834 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2835                    struct lpfc_iocbq **piocb)
2836 {
2837         struct lpfc_iocbq * nextiocb;
2838
2839         nextiocb = lpfc_sli_ringtx_get(phba, pring);
2840         if (!nextiocb) {
2841                 nextiocb = *piocb;
2842                 *piocb = NULL;
2843         }
2844
2845         return nextiocb;
2846 }
2847
2848 /*
2849  * Lockless version of lpfc_sli_issue_iocb.
2850  */
2851 int
2852 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2853                     struct lpfc_iocbq *piocb, uint32_t flag)
2854 {
2855         struct lpfc_iocbq *nextiocb;
2856         IOCB_t *iocb;
2857
2858         if (piocb->iocb_cmpl && (!piocb->vport) &&
2859            (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2860            (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2861                 lpfc_printf_log(phba, KERN_ERR,
2862                                 LOG_SLI | LOG_VPORT,
2863                                 "1807 IOCB x%x failed. No vport\n",
2864                                 piocb->iocb.ulpCommand);
2865                 dump_stack();
2866                 return IOCB_ERROR;
2867         }
2868
2869
2870         /* If the PCI channel is in offline state, do not post iocbs. */
2871         if (unlikely(pci_channel_offline(phba->pcidev)))
2872                 return IOCB_ERROR;
2873
2874         /*
2875          * We should never get an IOCB if we are in a < LINK_DOWN state
2876          */
2877         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
2878                 return IOCB_ERROR;
2879
2880         /*
2881          * Check to see if we are blocking IOCB processing because of a
2882          * outstanding mbox command.
2883          */
2884         if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2885                 goto iocb_busy;
2886
2887         if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
2888                 /*
2889                  * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
2890                  * can be issued if the link is not up.
2891                  */
2892                 switch (piocb->iocb.ulpCommand) {
2893                 case CMD_QUE_RING_BUF_CN:
2894                 case CMD_QUE_RING_BUF64_CN:
2895                         /*
2896                          * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2897                          * completion, iocb_cmpl MUST be 0.
2898                          */
2899                         if (piocb->iocb_cmpl)
2900                                 piocb->iocb_cmpl = NULL;
2901                         /*FALLTHROUGH*/
2902                 case CMD_CREATE_XRI_CR:
2903                 case CMD_CLOSE_XRI_CN:
2904                 case CMD_CLOSE_XRI_CX:
2905                         break;
2906                 default:
2907                         goto iocb_busy;
2908                 }
2909
2910         /*
2911          * For FCP commands, we must be in a state where we can process link
2912          * attention events.
2913          */
2914         } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2915                             !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
2916                 goto iocb_busy;
2917         }
2918
2919         while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2920                (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2921                 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2922
2923         if (iocb)
2924                 lpfc_sli_update_ring(phba, pring);
2925         else
2926                 lpfc_sli_update_full_ring(phba, pring);
2927
2928         if (!piocb)
2929                 return IOCB_SUCCESS;
2930
2931         goto out_busy;
2932
2933  iocb_busy:
2934         pring->stats.iocb_cmd_delay++;
2935
2936  out_busy:
2937
2938         if (!(flag & SLI_IOCB_RET_IOCB)) {
2939                 __lpfc_sli_ringtx_put(phba, pring, piocb);
2940                 return IOCB_SUCCESS;
2941         }
2942
2943         return IOCB_BUSY;
2944 }
2945
2946
2947 int
2948 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2949                     struct lpfc_iocbq *piocb, uint32_t flag)
2950 {
2951         unsigned long iflags;
2952         int rc;
2953
2954         spin_lock_irqsave(&phba->hbalock, iflags);
2955         rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2956         spin_unlock_irqrestore(&phba->hbalock, iflags);
2957
2958         return rc;
2959 }
2960
2961 static int
2962 lpfc_extra_ring_setup( struct lpfc_hba *phba)
2963 {
2964         struct lpfc_sli *psli;
2965         struct lpfc_sli_ring *pring;
2966
2967         psli = &phba->sli;
2968
2969         /* Adjust cmd/rsp ring iocb entries more evenly */
2970
2971         /* Take some away from the FCP ring */
2972         pring = &psli->ring[psli->fcp_ring];
2973         pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2974         pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2975         pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2976         pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2977
2978         /* and give them to the extra ring */
2979         pring = &psli->ring[psli->extra_ring];
2980
2981         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2982         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2983         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2984         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2985
2986         /* Setup default profile for this ring */
2987         pring->iotag_max = 4096;
2988         pring->num_mask = 1;
2989         pring->prt[0].profile = 0;      /* Mask 0 */
2990         pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2991         pring->prt[0].type = phba->cfg_multi_ring_type;
2992         pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2993         return 0;
2994 }
2995
2996 int
2997 lpfc_sli_setup(struct lpfc_hba *phba)
2998 {
2999         int i, totiocbsize = 0;
3000         struct lpfc_sli *psli = &phba->sli;
3001         struct lpfc_sli_ring *pring;
3002
3003         psli->num_rings = MAX_CONFIGURED_RINGS;
3004         psli->sli_flag = 0;
3005         psli->fcp_ring = LPFC_FCP_RING;
3006         psli->next_ring = LPFC_FCP_NEXT_RING;
3007         psli->extra_ring = LPFC_EXTRA_RING;
3008
3009         psli->iocbq_lookup = NULL;
3010         psli->iocbq_lookup_len = 0;
3011         psli->last_iotag = 0;
3012
3013         for (i = 0; i < psli->num_rings; i++) {
3014                 pring = &psli->ring[i];
3015                 switch (i) {
3016                 case LPFC_FCP_RING:     /* ring 0 - FCP */
3017                         /* numCiocb and numRiocb are used in config_port */
3018                         pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
3019                         pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
3020                         pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
3021                         pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
3022                         pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
3023                         pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
3024                         pring->sizeCiocb = (phba->sli_rev == 3) ?
3025                                                         SLI3_IOCB_CMD_SIZE :
3026                                                         SLI2_IOCB_CMD_SIZE;
3027                         pring->sizeRiocb = (phba->sli_rev == 3) ?
3028                                                         SLI3_IOCB_RSP_SIZE :
3029                                                         SLI2_IOCB_RSP_SIZE;
3030                         pring->iotag_ctr = 0;
3031                         pring->iotag_max =
3032                             (phba->cfg_hba_queue_depth * 2);
3033                         pring->fast_iotag = pring->iotag_max;
3034                         pring->num_mask = 0;
3035                         break;
3036                 case LPFC_EXTRA_RING:   /* ring 1 - EXTRA */
3037                         /* numCiocb and numRiocb are used in config_port */
3038                         pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
3039                         pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
3040                         pring->sizeCiocb = (phba->sli_rev == 3) ?
3041                                                         SLI3_IOCB_CMD_SIZE :
3042                                                         SLI2_IOCB_CMD_SIZE;
3043                         pring->sizeRiocb = (phba->sli_rev == 3) ?
3044                                                         SLI3_IOCB_RSP_SIZE :
3045                                                         SLI2_IOCB_RSP_SIZE;
3046                         pring->iotag_max = phba->cfg_hba_queue_depth;
3047                         pring->num_mask = 0;
3048                         break;
3049                 case LPFC_ELS_RING:     /* ring 2 - ELS / CT */
3050                         /* numCiocb and numRiocb are used in config_port */
3051                         pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
3052                         pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
3053                         pring->sizeCiocb = (phba->sli_rev == 3) ?
3054                                                         SLI3_IOCB_CMD_SIZE :
3055                                                         SLI2_IOCB_CMD_SIZE;
3056                         pring->sizeRiocb = (phba->sli_rev == 3) ?
3057                                                         SLI3_IOCB_RSP_SIZE :
3058                                                         SLI2_IOCB_RSP_SIZE;
3059                         pring->fast_iotag = 0;
3060                         pring->iotag_ctr = 0;
3061                         pring->iotag_max = 4096;
3062                         pring->num_mask = 4;
3063                         pring->prt[0].profile = 0;      /* Mask 0 */
3064                         pring->prt[0].rctl = FC_ELS_REQ;
3065                         pring->prt[0].type = FC_ELS_DATA;
3066                         pring->prt[0].lpfc_sli_rcv_unsol_event =
3067                             lpfc_els_unsol_event;
3068                         pring->prt[1].profile = 0;      /* Mask 1 */
3069                         pring->prt[1].rctl = FC_ELS_RSP;
3070                         pring->prt[1].type = FC_ELS_DATA;
3071                         pring->prt[1].lpfc_sli_rcv_unsol_event =
3072                             lpfc_els_unsol_event;
3073                         pring->prt[2].profile = 0;      /* Mask 2 */
3074                         /* NameServer Inquiry */
3075                         pring->prt[2].rctl = FC_UNSOL_CTL;
3076                         /* NameServer */
3077                         pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
3078                         pring->prt[2].lpfc_sli_rcv_unsol_event =
3079                             lpfc_ct_unsol_event;
3080                         pring->prt[3].profile = 0;      /* Mask 3 */
3081                         /* NameServer response */
3082                         pring->prt[3].rctl = FC_SOL_CTL;
3083                         /* NameServer */
3084                         pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
3085                         pring->prt[3].lpfc_sli_rcv_unsol_event =
3086                             lpfc_ct_unsol_event;
3087                         break;
3088                 }
3089                 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
3090                                 (pring->numRiocb * pring->sizeRiocb);
3091         }
3092         if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
3093                 /* Too many cmd / rsp ring entries in SLI2 SLIM */
3094                 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
3095                        "SLI2 SLIM Data: x%x x%lx\n",
3096                        phba->brd_no, totiocbsize,
3097                        (unsigned long) MAX_SLIM_IOCB_SIZE);
3098         }
3099         if (phba->cfg_multi_ring_support == 2)
3100                 lpfc_extra_ring_setup(phba);
3101
3102         return 0;
3103 }
3104
3105 int
3106 lpfc_sli_queue_setup(struct lpfc_hba *phba)
3107 {
3108         struct lpfc_sli *psli;
3109         struct lpfc_sli_ring *pring;
3110         int i;
3111
3112         psli = &phba->sli;
3113         spin_lock_irq(&phba->hbalock);
3114         INIT_LIST_HEAD(&psli->mboxq);
3115         INIT_LIST_HEAD(&psli->mboxq_cmpl);
3116         /* Initialize list headers for txq and txcmplq as double linked lists */
3117         for (i = 0; i < psli->num_rings; i++) {
3118                 pring = &psli->ring[i];
3119                 pring->ringno = i;
3120                 pring->next_cmdidx  = 0;
3121                 pring->local_getidx = 0;
3122                 pring->cmdidx = 0;
3123                 INIT_LIST_HEAD(&pring->txq);
3124                 INIT_LIST_HEAD(&pring->txcmplq);
3125                 INIT_LIST_HEAD(&pring->iocb_continueq);
3126                 INIT_LIST_HEAD(&pring->postbufq);
3127         }
3128         spin_unlock_irq(&phba->hbalock);
3129         return 1;
3130 }
3131
3132 int
3133 lpfc_sli_host_down(struct lpfc_vport *vport)
3134 {
3135         LIST_HEAD(completions);
3136         struct lpfc_hba *phba = vport->phba;
3137         struct lpfc_sli *psli = &phba->sli;
3138         struct lpfc_sli_ring *pring;
3139         struct lpfc_iocbq *iocb, *next_iocb;
3140         int i;
3141         unsigned long flags = 0;
3142         uint16_t prev_pring_flag;
3143
3144         lpfc_cleanup_discovery_resources(vport);
3145
3146         spin_lock_irqsave(&phba->hbalock, flags);
3147         for (i = 0; i < psli->num_rings; i++) {
3148                 pring = &psli->ring[i];
3149                 prev_pring_flag = pring->flag;
3150                 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3151                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
3152                 /*
3153                  * Error everything on the txq since these iocbs have not been
3154                  * given to the FW yet.
3155                  */
3156                 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3157                         if (iocb->vport != vport)
3158                                 continue;
3159                         list_move_tail(&iocb->list, &completions);
3160                         pring->txq_cnt--;
3161                 }
3162
3163                 /* Next issue ABTS for everything on the txcmplq */
3164                 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3165                                                                         list) {
3166                         if (iocb->vport != vport)
3167                                 continue;
3168                         lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3169                 }
3170
3171                 pring->flag = prev_pring_flag;
3172         }
3173
3174         spin_unlock_irqrestore(&phba->hbalock, flags);
3175
3176         while (!list_empty(&completions)) {
3177                 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3178
3179                 if (!iocb->iocb_cmpl)
3180                         lpfc_sli_release_iocbq(phba, iocb);
3181                 else {
3182                         iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3183                         iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN;
3184                         (iocb->iocb_cmpl) (phba, iocb, iocb);
3185                 }
3186         }
3187         return 1;
3188 }
3189
3190 int
3191 lpfc_sli_hba_down(struct lpfc_hba *phba)
3192 {
3193         LIST_HEAD(completions);
3194         struct lpfc_sli *psli = &phba->sli;
3195         struct lpfc_sli_ring *pring;
3196         LPFC_MBOXQ_t *pmb;
3197         struct lpfc_iocbq *iocb;
3198         IOCB_t *cmd = NULL;
3199         int i;
3200         unsigned long flags = 0;
3201
3202         lpfc_hba_down_prep(phba);
3203
3204         lpfc_fabric_abort_hba(phba);
3205
3206         spin_lock_irqsave(&phba->hbalock, flags);
3207         for (i = 0; i < psli->num_rings; i++) {
3208                 pring = &psli->ring[i];
3209                 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3210                         pring->flag |= LPFC_DEFERRED_RING_EVENT;
3211
3212                 /*
3213                  * Error everything on the txq since these iocbs have not been
3214                  * given to the FW yet.
3215                  */
3216                 list_splice_init(&pring->txq, &completions);
3217                 pring->txq_cnt = 0;
3218
3219         }
3220         spin_unlock_irqrestore(&phba->hbalock, flags);
3221
3222         while (!list_empty(&completions)) {
3223                 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3224                 cmd = &iocb->iocb;
3225
3226                 if (!iocb->iocb_cmpl)
3227                         lpfc_sli_release_iocbq(phba, iocb);
3228                 else {
3229                         cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3230                         cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3231                         (iocb->iocb_cmpl) (phba, iocb, iocb);
3232                 }
3233         }
3234
3235         /* Return any active mbox cmds */
3236         del_timer_sync(&psli->mbox_tmo);
3237         spin_lock_irqsave(&phba->hbalock, flags);
3238
3239         spin_lock(&phba->pport->work_port_lock);
3240         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3241         spin_unlock(&phba->pport->work_port_lock);
3242
3243         if (psli->mbox_active) {
3244                 list_add_tail(&psli->mbox_active->list, &completions);
3245                 psli->mbox_active = NULL;
3246                 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3247         }
3248
3249         /* Return any pending or completed mbox cmds */
3250         list_splice_init(&phba->sli.mboxq, &completions);
3251         list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3252         INIT_LIST_HEAD(&psli->mboxq);
3253         INIT_LIST_HEAD(&psli->mboxq_cmpl);
3254
3255         spin_unlock_irqrestore(&phba->hbalock, flags);
3256
3257         while (!list_empty(&completions)) {
3258                 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
3259                 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3260                 if (pmb->mbox_cmpl) {
3261                         pmb->mbox_cmpl(phba,pmb);
3262                 }
3263         }
3264         return 1;
3265 }
3266
3267 void
3268 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
3269 {
3270         uint32_t *src = srcp;
3271         uint32_t *dest = destp;
3272         uint32_t ldata;
3273         int i;
3274
3275         for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
3276                 ldata = *src;
3277                 ldata = le32_to_cpu(ldata);
3278                 *dest = ldata;
3279                 src++;
3280                 dest++;
3281         }
3282 }
3283
3284 int
3285 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3286                          struct lpfc_dmabuf *mp)
3287 {
3288         /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
3289            later */
3290         spin_lock_irq(&phba->hbalock);
3291         list_add_tail(&mp->list, &pring->postbufq);
3292         pring->postbufq_cnt++;
3293         spin_unlock_irq(&phba->hbalock);
3294         return 0;
3295 }
3296
3297
3298 struct lpfc_dmabuf *
3299 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3300                          dma_addr_t phys)
3301 {
3302         struct lpfc_dmabuf *mp, *next_mp;
3303         struct list_head *slp = &pring->postbufq;
3304
3305         /* Search postbufq, from the begining, looking for a match on phys */
3306         spin_lock_irq(&phba->hbalock);
3307         list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
3308                 if (mp->phys == phys) {
3309                         list_del_init(&mp->list);
3310                         pring->postbufq_cnt--;
3311                         spin_unlock_irq(&phba->hbalock);
3312                         return mp;
3313                 }
3314         }
3315
3316         spin_unlock_irq(&phba->hbalock);
3317         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3318                         "0410 Cannot find virtual addr for mapped buf on "
3319                         "ring %d Data x%llx x%p x%p x%x\n",
3320                         pring->ringno, (unsigned long long)phys,
3321                         slp->next, slp->prev, pring->postbufq_cnt);
3322         return NULL;
3323 }
3324
3325 static void
3326 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3327                         struct lpfc_iocbq *rspiocb)
3328 {
3329         IOCB_t *irsp = &rspiocb->iocb;
3330         uint16_t abort_iotag, abort_context;
3331         struct lpfc_iocbq *abort_iocb;
3332         struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3333
3334         abort_iocb = NULL;
3335
3336         if (irsp->ulpStatus) {
3337                 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
3338                 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
3339
3340                 spin_lock_irq(&phba->hbalock);
3341                 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
3342                         abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
3343
3344                 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
3345                                 "0327 Cannot abort els iocb %p "
3346                                 "with tag %x context %x, abort status %x, "
3347                                 "abort code %x\n",
3348                                 abort_iocb, abort_iotag, abort_context,
3349                                 irsp->ulpStatus, irsp->un.ulpWord[4]);
3350
3351                 /*
3352                  * make sure we have the right iocbq before taking it
3353                  * off the txcmplq and try to call completion routine.
3354                  */
3355                 if (!abort_iocb ||
3356                     abort_iocb->iocb.ulpContext != abort_context ||
3357                     (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
3358                         spin_unlock_irq(&phba->hbalock);
3359                 else {
3360                         list_del_init(&abort_iocb->list);
3361                         pring->txcmplq_cnt--;
3362                         spin_unlock_irq(&phba->hbalock);
3363
3364                         abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3365                         abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3366                         abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
3367                         (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
3368                 }
3369         }
3370
3371         lpfc_sli_release_iocbq(phba, cmdiocb);
3372         return;
3373 }
3374
3375 static void
3376 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3377                      struct lpfc_iocbq *rspiocb)
3378 {
3379         IOCB_t *irsp = &rspiocb->iocb;
3380
3381         /* ELS cmd tag <ulpIoTag> completes */
3382         lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3383                         "0133 Ignoring ELS cmd tag x%x completion Data: "
3384                         "x%x x%x x%x\n",
3385                         irsp->ulpIoTag, irsp->ulpStatus,
3386                         irsp->un.ulpWord[4], irsp->ulpTimeout);
3387         if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
3388                 lpfc_ct_free_iocb(phba, cmdiocb);
3389         else
3390                 lpfc_els_free_iocb(phba, cmdiocb);
3391         return;
3392 }
3393
3394 int
3395 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3396                            struct lpfc_iocbq *cmdiocb)
3397 {
3398         struct lpfc_vport *vport = cmdiocb->vport;
3399         struct lpfc_iocbq *abtsiocbp;
3400         IOCB_t *icmd = NULL;
3401         IOCB_t *iabt = NULL;
3402         int retval = IOCB_ERROR;
3403
3404         /*
3405          * There are certain command types we don't want to abort.  And we
3406          * don't want to abort commands that are already in the process of
3407          * being aborted.
3408          */
3409         icmd = &cmdiocb->iocb;
3410         if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
3411             icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3412             (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
3413                 return 0;
3414
3415         /* If we're unloading, don't abort iocb on the ELS ring, but change the
3416          * callback so that nothing happens when it finishes.
3417          */
3418         if ((vport->load_flag & FC_UNLOADING) &&
3419             (pring->ringno == LPFC_ELS_RING)) {
3420                 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3421                         cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3422                 else
3423                         cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
3424                 goto abort_iotag_exit;
3425         }
3426
3427         /* issue ABTS for this IOCB based on iotag */
3428         abtsiocbp = __lpfc_sli_get_iocbq(phba);
3429         if (abtsiocbp == NULL)
3430                 return 0;
3431
3432         /* This signals the response to set the correct status
3433          * before calling the completion handler.
3434          */
3435         cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
3436
3437         iabt = &abtsiocbp->iocb;
3438         iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
3439         iabt->un.acxri.abortContextTag = icmd->ulpContext;
3440         iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
3441         iabt->ulpLe = 1;
3442         iabt->ulpClass = icmd->ulpClass;
3443
3444         if (phba->link_state >= LPFC_LINK_UP)
3445                 iabt->ulpCommand = CMD_ABORT_XRI_CN;
3446         else
3447                 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
3448
3449         abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
3450
3451         lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3452                          "0339 Abort xri x%x, original iotag x%x, "
3453                          "abort cmd iotag x%x\n",
3454                          iabt->un.acxri.abortContextTag,
3455                          iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
3456         retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
3457
3458 abort_iotag_exit:
3459         /*
3460          * Caller to this routine should check for IOCB_ERROR
3461          * and handle it properly.  This routine no longer removes
3462          * iocb off txcmplq and call compl in case of IOCB_ERROR.
3463          */
3464         return retval;
3465 }
3466
3467 static int
3468 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3469                            uint16_t tgt_id, uint64_t lun_id,
3470                            lpfc_ctx_cmd ctx_cmd)
3471 {
3472         struct lpfc_scsi_buf *lpfc_cmd;
3473         struct scsi_cmnd *cmnd;
3474         int rc = 1;
3475
3476         if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
3477                 return rc;
3478
3479         if (iocbq->vport != vport)
3480                 return rc;
3481
3482         lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
3483         cmnd = lpfc_cmd->pCmd;
3484
3485         if (cmnd == NULL)
3486                 return rc;
3487
3488         switch (ctx_cmd) {
3489         case LPFC_CTX_LUN:
3490                 if ((cmnd->device->id == tgt_id) &&
3491                     (cmnd->device->lun == lun_id))
3492                         rc = 0;
3493                 break;
3494         case LPFC_CTX_TGT:
3495                 if (cmnd->device->id == tgt_id)
3496                         rc = 0;
3497                 break;
3498         case LPFC_CTX_HOST:
3499                 rc = 0;
3500                 break;
3501         default:
3502                 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
3503                         __FUNCTION__, ctx_cmd);
3504                 break;
3505         }
3506
3507         return rc;
3508 }
3509
3510 int
3511 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
3512                   lpfc_ctx_cmd ctx_cmd)
3513 {
3514         struct lpfc_hba *phba = vport->phba;
3515         struct lpfc_iocbq *iocbq;
3516         int sum, i;
3517
3518         for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
3519                 iocbq = phba->sli.iocbq_lookup[i];
3520
3521                 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
3522                                                 ctx_cmd) == 0)
3523                         sum++;
3524         }
3525
3526         return sum;
3527 }
3528
3529 void
3530 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3531                         struct lpfc_iocbq *rspiocb)
3532 {
3533         lpfc_sli_release_iocbq(phba, cmdiocb);
3534         return;
3535 }
3536
3537 int
3538 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
3539                     uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
3540 {
3541         struct lpfc_hba *phba = vport->phba;
3542         struct lpfc_iocbq *iocbq;
3543         struct lpfc_iocbq *abtsiocb;
3544         IOCB_t *cmd = NULL;
3545         int errcnt = 0, ret_val = 0;
3546         int i;
3547
3548         for (i = 1; i <= phba->sli.last_iotag; i++) {
3549                 iocbq = phba->sli.iocbq_lookup[i];
3550
3551                 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
3552                                                abort_cmd) != 0)
3553                         continue;
3554
3555                 /* issue ABTS for this IOCB based on iotag */
3556                 abtsiocb = lpfc_sli_get_iocbq(phba);
3557                 if (abtsiocb == NULL) {
3558                         errcnt++;
3559                         continue;
3560                 }
3561
3562                 cmd = &iocbq->iocb;
3563                 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
3564                 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
3565                 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
3566                 abtsiocb->iocb.ulpLe = 1;
3567                 abtsiocb->iocb.ulpClass = cmd->ulpClass;
3568                 abtsiocb->vport = phba->pport;
3569
3570                 if (lpfc_is_link_up(phba))
3571                         abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
3572                 else
3573                         abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
3574
3575                 /* Setup callback routine and issue the command. */
3576                 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3577                 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
3578                 if (ret_val == IOCB_ERROR) {
3579                         lpfc_sli_release_iocbq(phba, abtsiocb);
3580                         errcnt++;
3581                         continue;
3582                 }
3583         }
3584
3585         return errcnt;
3586 }
3587
3588 static void
3589 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3590                         struct lpfc_iocbq *cmdiocbq,
3591                         struct lpfc_iocbq *rspiocbq)
3592 {
3593         wait_queue_head_t *pdone_q;
3594         unsigned long iflags;
3595
3596         spin_lock_irqsave(&phba->hbalock, iflags);
3597         cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3598         if (cmdiocbq->context2 && rspiocbq)
3599                 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3600                        &rspiocbq->iocb, sizeof(IOCB_t));
3601
3602         pdone_q = cmdiocbq->context_un.wait_queue;
3603         if (pdone_q)
3604                 wake_up(pdone_q);
3605         spin_unlock_irqrestore(&phba->hbalock, iflags);
3606         return;
3607 }
3608
3609 /*
3610  * Issue the caller's iocb and wait for its completion, but no longer than the
3611  * caller's timeout.  Note that iocb_flags is cleared before the
3612  * lpfc_sli_issue_call since the wake routine sets a unique value and by
3613  * definition this is a wait function.
3614  */
3615
3616 int
3617 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3618                          struct lpfc_sli_ring *pring,
3619                          struct lpfc_iocbq *piocb,
3620                          struct lpfc_iocbq *prspiocbq,
3621                          uint32_t timeout)
3622 {
3623         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3624         long timeleft, timeout_req = 0;
3625         int retval = IOCB_SUCCESS;
3626         uint32_t creg_val;
3627
3628         /*
3629          * If the caller has provided a response iocbq buffer, then context2
3630          * is NULL or its an error.
3631          */
3632         if (prspiocbq) {
3633                 if (piocb->context2)
3634                         return IOCB_ERROR;
3635                 piocb->context2 = prspiocbq;
3636         }
3637
3638         piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3639         piocb->context_un.wait_queue = &done_q;
3640         piocb->iocb_flag &= ~LPFC_IO_WAKE;
3641
3642         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3643                 creg_val = readl(phba->HCregaddr);
3644                 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
3645                 writel(creg_val, phba->HCregaddr);
3646                 readl(phba->HCregaddr); /* flush */
3647         }
3648
3649         retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3650         if (retval == IOCB_SUCCESS) {
3651                 timeout_req = timeout * HZ;
3652                 timeleft = wait_event_timeout(done_q,
3653                                 piocb->iocb_flag & LPFC_IO_WAKE,
3654                                 timeout_req);
3655
3656                 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3657                         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3658                                         "0331 IOCB wake signaled\n");
3659                 } else if (timeleft == 0) {
3660                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3661                                         "0338 IOCB wait timeout error - no "
3662                                         "wake response Data x%x\n", timeout);
3663                         retval = IOCB_TIMEDOUT;
3664                 } else {
3665                         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3666                                         "0330 IOCB wake NOT set, "
3667                                         "Data x%x x%lx\n",
3668                                         timeout, (timeleft / jiffies));
3669                         retval = IOCB_TIMEDOUT;
3670                 }
3671         } else {
3672                 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3673                                 ":0332 IOCB wait issue failed, Data x%x\n",
3674                                 retval);
3675                 retval = IOCB_ERROR;
3676         }
3677
3678         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3679                 creg_val = readl(phba->HCregaddr);
3680                 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3681                 writel(creg_val, phba->HCregaddr);
3682                 readl(phba->HCregaddr); /* flush */
3683         }
3684
3685         if (prspiocbq)
3686                 piocb->context2 = NULL;
3687
3688         piocb->context_un.wait_queue = NULL;
3689         piocb->iocb_cmpl = NULL;
3690         return retval;
3691 }
3692
3693 int
3694 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3695                          uint32_t timeout)
3696 {
3697         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3698         int retval;
3699         unsigned long flag;
3700
3701         /* The caller must leave context1 empty. */
3702         if (pmboxq->context1 != 0)
3703                 return MBX_NOT_FINISHED;
3704
3705         /* setup wake call as IOCB callback */
3706         pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3707         /* setup context field to pass wait_queue pointer to wake function  */
3708         pmboxq->context1 = &done_q;
3709
3710         /* now issue the command */
3711         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3712
3713         if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3714                 wait_event_interruptible_timeout(done_q,
3715                                 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3716                                 timeout * HZ);
3717
3718                 spin_lock_irqsave(&phba->hbalock, flag);
3719                 pmboxq->context1 = NULL;
3720                 /*
3721                  * if LPFC_MBX_WAKE flag is set the mailbox is completed
3722                  * else do not free the resources.
3723                  */
3724                 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
3725                         retval = MBX_SUCCESS;
3726                 else {
3727                         retval = MBX_TIMEOUT;
3728                         pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3729                 }
3730                 spin_unlock_irqrestore(&phba->hbalock, flag);
3731         }
3732
3733         return retval;
3734 }
3735
3736 int
3737 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3738 {
3739         struct lpfc_vport *vport = phba->pport;
3740         int i = 0;
3741         uint32_t ha_copy;
3742
3743         while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
3744                 if (i++ > LPFC_MBOX_TMO * 1000)
3745                         return 1;
3746
3747                 /*
3748                  * Call lpfc_sli_handle_mb_event only if a mailbox cmd
3749                  * did finish. This way we won't get the misleading
3750                  * "Stray Mailbox Interrupt" message.
3751                  */
3752                 spin_lock_irq(&phba->hbalock);
3753                 ha_copy = phba->work_ha;
3754                 phba->work_ha &= ~HA_MBATT;
3755                 spin_unlock_irq(&phba->hbalock);
3756
3757                 if (ha_copy & HA_MBATT)
3758                         if (lpfc_sli_handle_mb_event(phba) == 0)
3759                                 i = 0;
3760
3761                 msleep(1);
3762         }
3763
3764         return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3765 }
3766
3767 irqreturn_t
3768 lpfc_intr_handler(int irq, void *dev_id)
3769 {
3770         struct lpfc_hba  *phba;
3771         uint32_t ha_copy;
3772         uint32_t work_ha_copy;
3773         unsigned long status;
3774         int i;
3775         uint32_t control;
3776
3777         MAILBOX_t *mbox, *pmbox;
3778         struct lpfc_vport *vport;
3779         struct lpfc_nodelist *ndlp;
3780         struct lpfc_dmabuf *mp;
3781         LPFC_MBOXQ_t *pmb;
3782         int rc;
3783
3784         /*
3785          * Get the driver's phba structure from the dev_id and
3786          * assume the HBA is not interrupting.
3787          */
3788         phba = (struct lpfc_hba *) dev_id;
3789
3790         if (unlikely(!phba))
3791                 return IRQ_NONE;
3792
3793         /* If the pci channel is offline, ignore all the interrupts. */
3794         if (unlikely(pci_channel_offline(phba->pcidev)))
3795                 return IRQ_NONE;
3796
3797         phba->sli.slistat.sli_intr++;
3798
3799         /*
3800          * Call the HBA to see if it is interrupting.  If not, don't claim
3801          * the interrupt
3802          */
3803
3804         /* Ignore all interrupts during initialization. */
3805         if (unlikely(phba->link_state < LPFC_LINK_DOWN))
3806                 return IRQ_NONE;
3807
3808         /*
3809          * Read host attention register to determine interrupt source
3810          * Clear Attention Sources, except Error Attention (to
3811          * preserve status) and Link Attention
3812          */
3813         spin_lock(&phba->hbalock);
3814         ha_copy = readl(phba->HAregaddr);
3815         /* If somebody is waiting to handle an eratt don't process it
3816          * here.  The brdkill function will do this.
3817          */
3818         if (phba->link_flag & LS_IGNORE_ERATT)
3819                 ha_copy &= ~HA_ERATT;
3820         writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3821         readl(phba->HAregaddr); /* flush */
3822         spin_unlock(&phba->hbalock);
3823
3824         if (unlikely(!ha_copy))
3825                 return IRQ_NONE;
3826
3827         work_ha_copy = ha_copy & phba->work_ha_mask;
3828
3829         if (unlikely(work_ha_copy)) {
3830                 if (work_ha_copy & HA_LATT) {
3831                         if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3832                                 /*
3833                                  * Turn off Link Attention interrupts
3834                                  * until CLEAR_LA done
3835                                  */
3836                                 spin_lock(&phba->hbalock);
3837                                 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3838                                 control = readl(phba->HCregaddr);
3839                                 control &= ~HC_LAINT_ENA;
3840                                 writel(control, phba->HCregaddr);
3841                                 readl(phba->HCregaddr); /* flush */
3842                                 spin_unlock(&phba->hbalock);
3843                         }
3844                         else
3845                                 work_ha_copy &= ~HA_LATT;
3846                 }
3847
3848                 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3849                         /*
3850                          * Turn off Slow Rings interrupts, LPFC_ELS_RING is
3851                          * the only slow ring.
3852                          */
3853                         status = (work_ha_copy &
3854                                 (HA_RXMASK  << (4*LPFC_ELS_RING)));
3855                         status >>= (4*LPFC_ELS_RING);
3856                         if (status & HA_RXMASK) {
3857                                 spin_lock(&phba->hbalock);
3858                                 control = readl(phba->HCregaddr);
3859
3860                                 lpfc_debugfs_slow_ring_trc(phba,
3861                                 "ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
3862                                 control, status,
3863                                 (uint32_t)phba->sli.slistat.sli_intr);
3864
3865                                 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
3866                                         lpfc_debugfs_slow_ring_trc(phba,
3867                                                 "ISR Disable ring:"
3868                                                 "pwork:x%x hawork:x%x wait:x%x",
3869                                                 phba->work_ha, work_ha_copy,
3870                                                 (uint32_t)((unsigned long)
3871                                                 phba->work_wait));
3872
3873                                         control &=
3874                                             ~(HC_R0INT_ENA << LPFC_ELS_RING);
3875                                         writel(control, phba->HCregaddr);
3876                                         readl(phba->HCregaddr); /* flush */
3877                                 }
3878                                 else {
3879                                         lpfc_debugfs_slow_ring_trc(phba,
3880                                                 "ISR slow ring:   pwork:"
3881                                                 "x%x hawork:x%x wait:x%x",
3882                                                 phba->work_ha, work_ha_copy,
3883                                                 (uint32_t)((unsigned long)
3884                                                 phba->work_wait));
3885                                 }
3886                                 spin_unlock(&phba->hbalock);
3887                         }
3888                 }
3889
3890                 if (work_ha_copy & HA_ERATT) {
3891                         phba->link_state = LPFC_HBA_ERROR;
3892                         /*
3893                          * There was a link/board error.  Read the
3894                          * status register to retrieve the error event
3895                          * and process it.
3896                          */
3897                         phba->sli.slistat.err_attn_event++;
3898                         /* Save status info */
3899                         phba->work_hs = readl(phba->HSregaddr);
3900                         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
3901                         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
3902
3903                         /* Clear Chip error bit */
3904                         writel(HA_ERATT, phba->HAregaddr);
3905                         readl(phba->HAregaddr); /* flush */
3906                         phba->pport->stopped = 1;
3907                 }
3908
3909                 if ((work_ha_copy & HA_MBATT) &&
3910                     (phba->sli.mbox_active)) {
3911                         pmb = phba->sli.mbox_active;
3912                         pmbox = &pmb->mb;
3913                         mbox = &phba->slim2p->mbx;
3914                         vport = pmb->vport;
3915
3916                         /* First check out the status word */
3917                         lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
3918                         if (pmbox->mbxOwner != OWN_HOST) {
3919                                 /*
3920                                  * Stray Mailbox Interrupt, mbxCommand <cmd>
3921                                  * mbxStatus <status>
3922                                  */
3923                                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
3924                                                 LOG_SLI,
3925                                                 "(%d):0304 Stray Mailbox "
3926                                                 "Interrupt mbxCommand x%x "
3927                                                 "mbxStatus x%x\n",
3928                                                 (vport ? vport->vpi : 0),
3929                                                 pmbox->mbxCommand,
3930                                                 pmbox->mbxStatus);
3931                         }
3932                         phba->last_completion_time = jiffies;
3933                         del_timer_sync(&phba->sli.mbox_tmo);
3934
3935                         phba->sli.mbox_active = NULL;
3936                         if (pmb->mbox_cmpl) {
3937                                 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3938                                                       MAILBOX_CMD_SIZE);
3939                         }
3940                         if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
3941                                 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
3942
3943                                 lpfc_debugfs_disc_trc(vport,
3944                                         LPFC_DISC_TRC_MBOX_VPORT,
3945                                         "MBOX dflt rpi: : status:x%x rpi:x%x",
3946                                         (uint32_t)pmbox->mbxStatus,
3947                                         pmbox->un.varWords[0], 0);
3948
3949                                 if ( !pmbox->mbxStatus) {
3950                                         mp = (struct lpfc_dmabuf *)
3951                                                 (pmb->context1);
3952                                         ndlp = (struct lpfc_nodelist *)
3953                                                 pmb->context2;
3954
3955                                         /* Reg_LOGIN of dflt RPI was successful.
3956                                          * new lets get rid of the RPI using the
3957                                          * same mbox buffer.
3958                                          */
3959                                         lpfc_unreg_login(phba, vport->vpi,
3960                                                 pmbox->un.varWords[0], pmb);
3961                                         pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3962                                         pmb->context1 = mp;
3963                                         pmb->context2 = ndlp;
3964                                         pmb->vport = vport;
3965                                         spin_lock(&phba->hbalock);
3966                                         phba->sli.sli_flag &=
3967                                                 ~LPFC_SLI_MBOX_ACTIVE;
3968                                         spin_unlock(&phba->hbalock);
3969                                         goto send_current_mbox;
3970                                 }
3971                         }
3972                         spin_lock(&phba->pport->work_port_lock);
3973                         phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3974                         spin_unlock(&phba->pport->work_port_lock);
3975                         lpfc_mbox_cmpl_put(phba, pmb);
3976                 }
3977                 if ((work_ha_copy & HA_MBATT) &&
3978                     (phba->sli.mbox_active == NULL)) {
3979 send_next_mbox:
3980                         spin_lock(&phba->hbalock);
3981                         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3982                         pmb = lpfc_mbox_get(phba);
3983                         spin_unlock(&phba->hbalock);
3984 send_current_mbox:
3985                         /* Process next mailbox command if there is one */
3986                         if (pmb != NULL) {
3987                                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3988                                 if (rc == MBX_NOT_FINISHED) {
3989                                         pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3990                                         lpfc_mbox_cmpl_put(phba, pmb);
3991                                         goto send_next_mbox;
3992                                 }
3993                         } else {
3994                                 /* Turn on IOCB processing */
3995                                 for (i = 0; i < phba->sli.num_rings; i++)
3996                                         lpfc_sli_turn_on_ring(phba, i);
3997                         }
3998
3999                 }
4000
4001                 spin_lock(&phba->hbalock);
4002                 phba->work_ha |= work_ha_copy;
4003                 if (phba->work_wait)
4004                         lpfc_worker_wake_up(phba);
4005                 spin_unlock(&phba->hbalock);
4006         }
4007
4008         ha_copy &= ~(phba->work_ha_mask);
4009
4010         /*
4011          * Process all events on FCP ring.  Take the optimized path for
4012          * FCP IO.  Any other IO is slow path and is handled by
4013          * the worker thread.
4014          */
4015         status = (ha_copy & (HA_RXMASK  << (4*LPFC_FCP_RING)));
4016         status >>= (4*LPFC_FCP_RING);
4017         if (status & HA_RXMASK)
4018                 lpfc_sli_handle_fast_ring_event(phba,
4019                                                 &phba->sli.ring[LPFC_FCP_RING],
4020                                                 status);
4021
4022         if (phba->cfg_multi_ring_support == 2) {
4023                 /*
4024                  * Process all events on extra ring.  Take the optimized path
4025                  * for extra ring IO.  Any other IO is slow path and is handled
4026                  * by the worker thread.
4027                  */
4028                 status = (ha_copy & (HA_RXMASK  << (4*LPFC_EXTRA_RING)));
4029                 status >>= (4*LPFC_EXTRA_RING);
4030                 if (status & HA_RXMASK) {
4031                         lpfc_sli_handle_fast_ring_event(phba,
4032                                         &phba->sli.ring[LPFC_EXTRA_RING],
4033                                         status);
4034                 }
4035         }
4036         return IRQ_HANDLED;
4037
4038 } /* lpfc_intr_handler */