]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/scsi/qla2xxx/qla_isr.c
[SCSI] qla2xxx: Use proper request/response queues with MQ instantiations.
[linux-2.6-omap-h63xx.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *,
14         struct req_que *, uint32_t);
15 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
17 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18         sts_entry_t *);
19 static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20
21 /**
22  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
23  * @irq:
24  * @dev_id: SCSI driver HA context
25  *
26  * Called by system whenever the host adapter generates an interrupt.
27  *
28  * Returns handled flag.
29  */
30 irqreturn_t
31 qla2100_intr_handler(int irq, void *dev_id)
32 {
33         scsi_qla_host_t *vha;
34         struct qla_hw_data *ha;
35         struct device_reg_2xxx __iomem *reg;
36         int             status;
37         unsigned long   iter;
38         uint16_t        hccr;
39         uint16_t        mb[4];
40         struct rsp_que *rsp;
41
42         rsp = (struct rsp_que *) dev_id;
43         if (!rsp) {
44                 printk(KERN_INFO
45                     "%s(): NULL response queue pointer\n", __func__);
46                 return (IRQ_NONE);
47         }
48
49         ha = rsp->hw;
50         reg = &ha->iobase->isp;
51         status = 0;
52
53         spin_lock(&ha->hardware_lock);
54         vha = qla2x00_get_rsp_host(rsp);
55         for (iter = 50; iter--; ) {
56                 hccr = RD_REG_WORD(&reg->hccr);
57                 if (hccr & HCCR_RISC_PAUSE) {
58                         if (pci_channel_offline(ha->pdev))
59                                 break;
60
61                         /*
62                          * Issue a "HARD" reset in order for the RISC interrupt
63                          * bit to be cleared.  Schedule a big hammmer to get
64                          * out of the RISC PAUSED state.
65                          */
66                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
67                         RD_REG_WORD(&reg->hccr);
68
69                         ha->isp_ops->fw_dump(vha, 1);
70                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
71                         break;
72                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
73                         break;
74
75                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
76                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
77                         RD_REG_WORD(&reg->hccr);
78
79                         /* Get mailbox data. */
80                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
81                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
82                                 qla2x00_mbx_completion(vha, mb[0]);
83                                 status |= MBX_INTERRUPT;
84                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
85                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
86                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
87                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
88                                 qla2x00_async_event(vha, rsp, mb);
89                         } else {
90                                 /*EMPTY*/
91                                 DEBUG2(printk("scsi(%ld): Unrecognized "
92                                     "interrupt type (%d).\n",
93                                     vha->host_no, mb[0]));
94                         }
95                         /* Release mailbox registers. */
96                         WRT_REG_WORD(&reg->semaphore, 0);
97                         RD_REG_WORD(&reg->semaphore);
98                 } else {
99                         qla2x00_process_response_queue(rsp);
100
101                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
102                         RD_REG_WORD(&reg->hccr);
103                 }
104         }
105         spin_unlock(&ha->hardware_lock);
106
107         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
109                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
110                 complete(&ha->mbx_intr_comp);
111         }
112
113         return (IRQ_HANDLED);
114 }
115
116 /**
117  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
118  * @irq:
119  * @dev_id: SCSI driver HA context
120  *
121  * Called by system whenever the host adapter generates an interrupt.
122  *
123  * Returns handled flag.
124  */
125 irqreturn_t
126 qla2300_intr_handler(int irq, void *dev_id)
127 {
128         scsi_qla_host_t *vha;
129         struct device_reg_2xxx __iomem *reg;
130         int             status;
131         unsigned long   iter;
132         uint32_t        stat;
133         uint16_t        hccr;
134         uint16_t        mb[4];
135         struct rsp_que *rsp;
136         struct qla_hw_data *ha;
137
138         rsp = (struct rsp_que *) dev_id;
139         if (!rsp) {
140                 printk(KERN_INFO
141                     "%s(): NULL response queue pointer\n", __func__);
142                 return (IRQ_NONE);
143         }
144
145         ha = rsp->hw;
146         reg = &ha->iobase->isp;
147         status = 0;
148
149         spin_lock(&ha->hardware_lock);
150         vha = qla2x00_get_rsp_host(rsp);
151         for (iter = 50; iter--; ) {
152                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153                 if (stat & HSR_RISC_PAUSED) {
154                         if (pci_channel_offline(ha->pdev))
155                                 break;
156
157                         hccr = RD_REG_WORD(&reg->hccr);
158                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
159                                 qla_printk(KERN_INFO, ha, "Parity error -- "
160                                     "HCCR=%x, Dumping firmware!\n", hccr);
161                         else
162                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
163                                     "HCCR=%x, Dumping firmware!\n", hccr);
164
165                         /*
166                          * Issue a "HARD" reset in order for the RISC
167                          * interrupt bit to be cleared.  Schedule a big
168                          * hammmer to get out of the RISC PAUSED state.
169                          */
170                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
171                         RD_REG_WORD(&reg->hccr);
172
173                         ha->isp_ops->fw_dump(vha, 1);
174                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
175                         break;
176                 } else if ((stat & HSR_RISC_INT) == 0)
177                         break;
178
179                 switch (stat & 0xff) {
180                 case 0x1:
181                 case 0x2:
182                 case 0x10:
183                 case 0x11:
184                         qla2x00_mbx_completion(vha, MSW(stat));
185                         status |= MBX_INTERRUPT;
186
187                         /* Release mailbox registers. */
188                         WRT_REG_WORD(&reg->semaphore, 0);
189                         break;
190                 case 0x12:
191                         mb[0] = MSW(stat);
192                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
195                         qla2x00_async_event(vha, rsp, mb);
196                         break;
197                 case 0x13:
198                         qla2x00_process_response_queue(rsp);
199                         break;
200                 case 0x15:
201                         mb[0] = MBA_CMPLT_1_16BIT;
202                         mb[1] = MSW(stat);
203                         qla2x00_async_event(vha, rsp, mb);
204                         break;
205                 case 0x16:
206                         mb[0] = MBA_SCSI_COMPLETION;
207                         mb[1] = MSW(stat);
208                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
209                         qla2x00_async_event(vha, rsp, mb);
210                         break;
211                 default:
212                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
213                             "(%d).\n",
214                             vha->host_no, stat & 0xff));
215                         break;
216                 }
217                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
218                 RD_REG_WORD_RELAXED(&reg->hccr);
219         }
220         spin_unlock(&ha->hardware_lock);
221
222         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
223             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
224                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
225                 complete(&ha->mbx_intr_comp);
226         }
227
228         return (IRQ_HANDLED);
229 }
230
231 /**
232  * qla2x00_mbx_completion() - Process mailbox command completions.
233  * @ha: SCSI driver HA context
234  * @mb0: Mailbox0 register
235  */
236 static void
237 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
238 {
239         uint16_t        cnt;
240         uint16_t __iomem *wptr;
241         struct qla_hw_data *ha = vha->hw;
242         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
243
244         /* Load return mailbox registers. */
245         ha->flags.mbox_int = 1;
246         ha->mailbox_out[0] = mb0;
247         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
248
249         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
250                 if (IS_QLA2200(ha) && cnt == 8)
251                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
252                 if (cnt == 4 || cnt == 5)
253                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
254                 else
255                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
256
257                 wptr++;
258         }
259
260         if (ha->mcp) {
261                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
262                     __func__, vha->host_no, ha->mcp->mb[0]));
263         } else {
264                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
265                     __func__, vha->host_no));
266         }
267 }
268
269 /**
270  * qla2x00_async_event() - Process aynchronous events.
271  * @ha: SCSI driver HA context
272  * @mb: Mailbox registers (0 - 3)
273  */
274 void
275 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
276 {
277 #define LS_UNKNOWN      2
278         static char     *link_speeds[5] = { "1", "2", "?", "4", "8" };
279         char            *link_speed;
280         uint16_t        handle_cnt;
281         uint16_t        cnt;
282         uint32_t        handles[5];
283         struct qla_hw_data *ha = vha->hw;
284         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
285         uint32_t        rscn_entry, host_pid;
286         uint8_t         rscn_queue_index;
287         unsigned long   flags;
288
289         /* Setup to process RIO completion. */
290         handle_cnt = 0;
291         switch (mb[0]) {
292         case MBA_SCSI_COMPLETION:
293                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
294                 handle_cnt = 1;
295                 break;
296         case MBA_CMPLT_1_16BIT:
297                 handles[0] = mb[1];
298                 handle_cnt = 1;
299                 mb[0] = MBA_SCSI_COMPLETION;
300                 break;
301         case MBA_CMPLT_2_16BIT:
302                 handles[0] = mb[1];
303                 handles[1] = mb[2];
304                 handle_cnt = 2;
305                 mb[0] = MBA_SCSI_COMPLETION;
306                 break;
307         case MBA_CMPLT_3_16BIT:
308                 handles[0] = mb[1];
309                 handles[1] = mb[2];
310                 handles[2] = mb[3];
311                 handle_cnt = 3;
312                 mb[0] = MBA_SCSI_COMPLETION;
313                 break;
314         case MBA_CMPLT_4_16BIT:
315                 handles[0] = mb[1];
316                 handles[1] = mb[2];
317                 handles[2] = mb[3];
318                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
319                 handle_cnt = 4;
320                 mb[0] = MBA_SCSI_COMPLETION;
321                 break;
322         case MBA_CMPLT_5_16BIT:
323                 handles[0] = mb[1];
324                 handles[1] = mb[2];
325                 handles[2] = mb[3];
326                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
327                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
328                 handle_cnt = 5;
329                 mb[0] = MBA_SCSI_COMPLETION;
330                 break;
331         case MBA_CMPLT_2_32BIT:
332                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
333                 handles[1] = le32_to_cpu(
334                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
335                     RD_MAILBOX_REG(ha, reg, 6));
336                 handle_cnt = 2;
337                 mb[0] = MBA_SCSI_COMPLETION;
338                 break;
339         default:
340                 break;
341         }
342
343         switch (mb[0]) {
344         case MBA_SCSI_COMPLETION:       /* Fast Post */
345                 if (!vha->flags.online)
346                         break;
347
348                 for (cnt = 0; cnt < handle_cnt; cnt++)
349                         qla2x00_process_completed_request(vha, rsp->req,
350                                 handles[cnt]);
351                 break;
352
353         case MBA_RESET:                 /* Reset */
354                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
355                         vha->host_no));
356
357                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
358                 break;
359
360         case MBA_SYSTEM_ERR:            /* System Error */
361                 qla_printk(KERN_INFO, ha,
362                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
363                     mb[1], mb[2], mb[3]);
364
365                 ha->isp_ops->fw_dump(vha, 1);
366
367                 if (IS_FWI2_CAPABLE(ha)) {
368                         if (mb[1] == 0 && mb[2] == 0) {
369                                 qla_printk(KERN_ERR, ha,
370                                     "Unrecoverable Hardware Error: adapter "
371                                     "marked OFFLINE!\n");
372                                 vha->flags.online = 0;
373                         } else
374                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
375                 } else if (mb[1] == 0) {
376                         qla_printk(KERN_INFO, ha,
377                             "Unrecoverable Hardware Error: adapter marked "
378                             "OFFLINE!\n");
379                         vha->flags.online = 0;
380                 } else
381                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
382                 break;
383
384         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
385                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
386                     vha->host_no));
387                 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
388
389                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
390                 break;
391
392         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
393                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
394                     vha->host_no));
395                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
396
397                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
398                 break;
399
400         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
401                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
402                     vha->host_no));
403                 break;
404
405         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
406                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
407                     mb[1]));
408                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
409
410                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
411                         atomic_set(&vha->loop_state, LOOP_DOWN);
412                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
413                         qla2x00_mark_all_devices_lost(vha, 1);
414                 }
415
416                 if (vha->vp_idx) {
417                         atomic_set(&vha->vp_state, VP_FAILED);
418                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
419                 }
420
421                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
422                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
423
424                 vha->flags.management_server_logged_in = 0;
425                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
426                 break;
427
428         case MBA_LOOP_UP:               /* Loop Up Event */
429                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
430                         link_speed = link_speeds[0];
431                         ha->link_data_rate = PORT_SPEED_1GB;
432                 } else {
433                         link_speed = link_speeds[LS_UNKNOWN];
434                         if (mb[1] < 5)
435                                 link_speed = link_speeds[mb[1]];
436                         ha->link_data_rate = mb[1];
437                 }
438
439                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
440                     vha->host_no, link_speed));
441                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
442                     link_speed);
443
444                 vha->flags.management_server_logged_in = 0;
445                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
446                 break;
447
448         case MBA_LOOP_DOWN:             /* Loop Down Event */
449                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
450                     "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
451                 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
452                     mb[1], mb[2], mb[3]);
453
454                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
455                         atomic_set(&vha->loop_state, LOOP_DOWN);
456                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
457                         vha->device_flags |= DFLG_NO_CABLE;
458                         qla2x00_mark_all_devices_lost(vha, 1);
459                 }
460
461                 if (vha->vp_idx) {
462                         atomic_set(&vha->vp_state, VP_FAILED);
463                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
464                 }
465
466                 vha->flags.management_server_logged_in = 0;
467                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
468                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
469                 break;
470
471         case MBA_LIP_RESET:             /* LIP reset occurred */
472                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
473                     vha->host_no, mb[1]));
474                 qla_printk(KERN_INFO, ha,
475                     "LIP reset occurred (%x).\n", mb[1]);
476
477                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
478                         atomic_set(&vha->loop_state, LOOP_DOWN);
479                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
480                         qla2x00_mark_all_devices_lost(vha, 1);
481                 }
482
483                 if (vha->vp_idx) {
484                         atomic_set(&vha->vp_state, VP_FAILED);
485                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
486                 }
487
488                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
489
490                 ha->operating_mode = LOOP;
491                 vha->flags.management_server_logged_in = 0;
492                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
493                 break;
494
495         case MBA_POINT_TO_POINT:        /* Point-to-Point */
496                 if (IS_QLA2100(ha))
497                         break;
498
499                 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
500                     vha->host_no));
501
502                 /*
503                  * Until there's a transition from loop down to loop up, treat
504                  * this as loop down only.
505                  */
506                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
507                         atomic_set(&vha->loop_state, LOOP_DOWN);
508                         if (!atomic_read(&vha->loop_down_timer))
509                                 atomic_set(&vha->loop_down_timer,
510                                     LOOP_DOWN_TIME);
511                         qla2x00_mark_all_devices_lost(vha, 1);
512                 }
513
514                 if (vha->vp_idx) {
515                         atomic_set(&vha->vp_state, VP_FAILED);
516                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
517                 }
518
519                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
520                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
521
522                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
523                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
524
525                 ha->flags.gpsc_supported = 1;
526                 vha->flags.management_server_logged_in = 0;
527                 break;
528
529         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
530                 if (IS_QLA2100(ha))
531                         break;
532
533                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
534                     "received.\n",
535                     vha->host_no));
536                 qla_printk(KERN_INFO, ha,
537                     "Configuration change detected: value=%x.\n", mb[1]);
538
539                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
540                         atomic_set(&vha->loop_state, LOOP_DOWN);
541                         if (!atomic_read(&vha->loop_down_timer))
542                                 atomic_set(&vha->loop_down_timer,
543                                     LOOP_DOWN_TIME);
544                         qla2x00_mark_all_devices_lost(vha, 1);
545                 }
546
547                 if (vha->vp_idx) {
548                         atomic_set(&vha->vp_state, VP_FAILED);
549                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
550                 }
551
552                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
553                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
554                 break;
555
556         case MBA_PORT_UPDATE:           /* Port database update */
557                 /* Only handle SCNs for our Vport index. */
558                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
559                         break;
560
561                 /*
562                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
563                  * event etc. earlier indicating loop is down) then process
564                  * it.  Otherwise ignore it and Wait for RSCN to come in.
565                  */
566                 atomic_set(&vha->loop_down_timer, 0);
567                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
568                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
569                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
570                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
571                             mb[2], mb[3]));
572                         break;
573                 }
574
575                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
576                     vha->host_no));
577                 DEBUG(printk(KERN_INFO
578                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
579                     vha->host_no, mb[1], mb[2], mb[3]));
580
581                 /*
582                  * Mark all devices as missing so we will login again.
583                  */
584                 atomic_set(&vha->loop_state, LOOP_UP);
585
586                 qla2x00_mark_all_devices_lost(vha, 1);
587
588                 vha->flags.rscn_queue_overflow = 1;
589
590                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
591                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
592                 break;
593
594         case MBA_RSCN_UPDATE:           /* State Change Registration */
595                 /* Check if the Vport has issued a SCR */
596                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
597                         break;
598                 /* Only handle SCNs for our Vport index. */
599                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
600                         break;
601                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
602                     vha->host_no));
603                 DEBUG(printk(KERN_INFO
604                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
605                     vha->host_no, mb[1], mb[2], mb[3]));
606
607                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
608                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
609                                 | vha->d_id.b.al_pa;
610                 if (rscn_entry == host_pid) {
611                         DEBUG(printk(KERN_INFO
612                             "scsi(%ld): Ignoring RSCN update to local host "
613                             "port ID (%06x)\n",
614                             vha->host_no, host_pid));
615                         break;
616                 }
617
618                 /* Ignore reserved bits from RSCN-payload. */
619                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
620                 rscn_queue_index = vha->rscn_in_ptr + 1;
621                 if (rscn_queue_index == MAX_RSCN_COUNT)
622                         rscn_queue_index = 0;
623                 if (rscn_queue_index != vha->rscn_out_ptr) {
624                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
625                         vha->rscn_in_ptr = rscn_queue_index;
626                 } else {
627                         vha->flags.rscn_queue_overflow = 1;
628                 }
629
630                 atomic_set(&vha->loop_state, LOOP_UPDATE);
631                 atomic_set(&vha->loop_down_timer, 0);
632                 vha->flags.management_server_logged_in = 0;
633
634                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
635                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
636                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
637                 break;
638
639         /* case MBA_RIO_RESPONSE: */
640         case MBA_ZIO_RESPONSE:
641                 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
642                     vha->host_no));
643
644                 if (IS_FWI2_CAPABLE(ha))
645                         qla24xx_process_response_queue(rsp);
646                 else
647                         qla2x00_process_response_queue(rsp);
648                 break;
649
650         case MBA_DISCARD_RND_FRAME:
651                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
652                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
653                 break;
654
655         case MBA_TRACE_NOTIFICATION:
656                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
657                 vha->host_no, mb[1], mb[2]));
658                 break;
659
660         case MBA_ISP84XX_ALERT:
661                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
662                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
663
664                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
665                 switch (mb[1]) {
666                 case A84_PANIC_RECOVERY:
667                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
668                             "%04x %04x\n", mb[2], mb[3]);
669                         break;
670                 case A84_OP_LOGIN_COMPLETE:
671                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
672                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
673                             "firmware version %x\n", ha->cs84xx->op_fw_version));
674                         break;
675                 case A84_DIAG_LOGIN_COMPLETE:
676                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
677                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
678                             "diagnostic firmware version %x\n",
679                             ha->cs84xx->diag_fw_version));
680                         break;
681                 case A84_GOLD_LOGIN_COMPLETE:
682                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
683                         ha->cs84xx->fw_update = 1;
684                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
685                             "firmware version %x\n",
686                             ha->cs84xx->gold_fw_version));
687                         break;
688                 default:
689                         qla_printk(KERN_ERR, ha,
690                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
691                             mb[1], mb[2], mb[3]);
692                 }
693                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
694                 break;
695         }
696
697         if (!vha->vp_idx && ha->num_vhosts)
698                 qla2x00_alert_all_vps(rsp, mb);
699 }
700
701 static void
702 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
703 {
704         fc_port_t *fcport = data;
705         struct scsi_qla_host *vha = fcport->vha;
706         struct qla_hw_data *ha = vha->hw;
707         struct req_que *req = NULL;
708
709         req = ha->req_q_map[vha->req_ques[0]];
710         if (!req)
711                 return;
712         if (req->max_q_depth <= sdev->queue_depth)
713                 return;
714
715         if (sdev->ordered_tags)
716                 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
717                     sdev->queue_depth + 1);
718         else
719                 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
720                     sdev->queue_depth + 1);
721
722         fcport->last_ramp_up = jiffies;
723
724         DEBUG2(qla_printk(KERN_INFO, ha,
725             "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
726             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
727             sdev->queue_depth));
728 }
729
730 static void
731 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
732 {
733         fc_port_t *fcport = data;
734
735         if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
736                 return;
737
738         DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
739             "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
740             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
741             sdev->queue_depth));
742 }
743
744 static inline void
745 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
746                                                                 srb_t *sp)
747 {
748         fc_port_t *fcport;
749         struct scsi_device *sdev;
750
751         sdev = sp->cmd->device;
752         if (sdev->queue_depth >= req->max_q_depth)
753                 return;
754
755         fcport = sp->fcport;
756         if (time_before(jiffies,
757             fcport->last_ramp_up + ql2xqfullrampup * HZ))
758                 return;
759         if (time_before(jiffies,
760             fcport->last_queue_full + ql2xqfullrampup * HZ))
761                 return;
762
763         starget_for_each_device(sdev->sdev_target, fcport,
764             qla2x00_adjust_sdev_qdepth_up);
765 }
766
767 /**
768  * qla2x00_process_completed_request() - Process a Fast Post response.
769  * @ha: SCSI driver HA context
770  * @index: SRB index
771  */
772 static void
773 qla2x00_process_completed_request(struct scsi_qla_host *vha,
774                                 struct req_que *req, uint32_t index)
775 {
776         srb_t *sp;
777         struct qla_hw_data *ha = vha->hw;
778
779         /* Validate handle. */
780         if (index >= MAX_OUTSTANDING_COMMANDS) {
781                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
782                     vha->host_no, index));
783                 qla_printk(KERN_WARNING, ha,
784                     "Invalid SCSI completion handle %d.\n", index);
785
786                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
787                 return;
788         }
789
790         sp = req->outstanding_cmds[index];
791         if (sp) {
792                 /* Free outstanding command slot. */
793                 req->outstanding_cmds[index] = NULL;
794
795                 CMD_COMPL_STATUS(sp->cmd) = 0L;
796                 CMD_SCSI_STATUS(sp->cmd) = 0L;
797
798                 /* Save ISP completion status */
799                 sp->cmd->result = DID_OK << 16;
800
801                 qla2x00_ramp_up_queue_depth(vha, req, sp);
802                 qla2x00_sp_compl(ha, sp);
803         } else {
804                 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
805                     vha->host_no));
806                 qla_printk(KERN_WARNING, ha,
807                     "Invalid ISP SCSI completion handle\n");
808
809                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
810         }
811 }
812
813 /**
814  * qla2x00_process_response_queue() - Process response queue entries.
815  * @ha: SCSI driver HA context
816  */
817 void
818 qla2x00_process_response_queue(struct rsp_que *rsp)
819 {
820         struct scsi_qla_host *vha;
821         struct qla_hw_data *ha = rsp->hw;
822         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
823         sts_entry_t     *pkt;
824         uint16_t        handle_cnt;
825         uint16_t        cnt;
826
827         vha = qla2x00_get_rsp_host(rsp);
828
829         if (!vha->flags.online)
830                 return;
831
832         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
833                 pkt = (sts_entry_t *)rsp->ring_ptr;
834
835                 rsp->ring_index++;
836                 if (rsp->ring_index == rsp->length) {
837                         rsp->ring_index = 0;
838                         rsp->ring_ptr = rsp->ring;
839                 } else {
840                         rsp->ring_ptr++;
841                 }
842
843                 if (pkt->entry_status != 0) {
844                         DEBUG3(printk(KERN_INFO
845                             "scsi(%ld): Process error entry.\n", vha->host_no));
846
847                         qla2x00_error_entry(vha, rsp, pkt);
848                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
849                         wmb();
850                         continue;
851                 }
852
853                 switch (pkt->entry_type) {
854                 case STATUS_TYPE:
855                         qla2x00_status_entry(vha, rsp, pkt);
856                         break;
857                 case STATUS_TYPE_21:
858                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
859                         for (cnt = 0; cnt < handle_cnt; cnt++) {
860                                 qla2x00_process_completed_request(vha, rsp->req,
861                                     ((sts21_entry_t *)pkt)->handle[cnt]);
862                         }
863                         break;
864                 case STATUS_TYPE_22:
865                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
866                         for (cnt = 0; cnt < handle_cnt; cnt++) {
867                                 qla2x00_process_completed_request(vha, rsp->req,
868                                     ((sts22_entry_t *)pkt)->handle[cnt]);
869                         }
870                         break;
871                 case STATUS_CONT_TYPE:
872                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
873                         break;
874                 default:
875                         /* Type Not Supported. */
876                         DEBUG4(printk(KERN_WARNING
877                             "scsi(%ld): Received unknown response pkt type %x "
878                             "entry status=%x.\n",
879                             vha->host_no, pkt->entry_type, pkt->entry_status));
880                         break;
881                 }
882                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
883                 wmb();
884         }
885
886         /* Adjust ring index */
887         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
888 }
889
890 static inline void
891 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
892 {
893         struct scsi_cmnd *cp = sp->cmd;
894
895         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
896                 sense_len = SCSI_SENSE_BUFFERSIZE;
897
898         CMD_ACTUAL_SNSLEN(cp) = sense_len;
899         sp->request_sense_length = sense_len;
900         sp->request_sense_ptr = cp->sense_buffer;
901         if (sp->request_sense_length > 32)
902                 sense_len = 32;
903
904         memcpy(cp->sense_buffer, sense_data, sense_len);
905
906         sp->request_sense_ptr += sense_len;
907         sp->request_sense_length -= sense_len;
908         if (sp->request_sense_length != 0)
909                 sp->fcport->vha->status_srb = sp;
910
911         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
912             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
913             cp->device->channel, cp->device->id, cp->device->lun, cp,
914             cp->serial_number));
915         if (sense_len)
916                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
917                     CMD_ACTUAL_SNSLEN(cp)));
918 }
919
920 /**
921  * qla2x00_status_entry() - Process a Status IOCB entry.
922  * @ha: SCSI driver HA context
923  * @pkt: Entry pointer
924  */
925 static void
926 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
927 {
928         srb_t           *sp;
929         fc_port_t       *fcport;
930         struct scsi_cmnd *cp;
931         sts_entry_t *sts;
932         struct sts_entry_24xx *sts24;
933         uint16_t        comp_status;
934         uint16_t        scsi_status;
935         uint8_t         lscsi_status;
936         int32_t         resid;
937         uint32_t        sense_len, rsp_info_len, resid_len, fw_resid_len;
938         uint8_t         *rsp_info, *sense_data;
939         struct qla_hw_data *ha = vha->hw;
940         struct req_que *req = rsp->req;
941
942         sts = (sts_entry_t *) pkt;
943         sts24 = (struct sts_entry_24xx *) pkt;
944         if (IS_FWI2_CAPABLE(ha)) {
945                 comp_status = le16_to_cpu(sts24->comp_status);
946                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
947         } else {
948                 comp_status = le16_to_cpu(sts->comp_status);
949                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
950         }
951
952         /* Fast path completion. */
953         if (comp_status == CS_COMPLETE && scsi_status == 0) {
954                 qla2x00_process_completed_request(vha, req, sts->handle);
955
956                 return;
957         }
958
959         /* Validate handle. */
960         if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
961                 sp = req->outstanding_cmds[sts->handle];
962                 req->outstanding_cmds[sts->handle] = NULL;
963         } else
964                 sp = NULL;
965
966         if (sp == NULL) {
967                 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
968                     vha->host_no));
969                 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
970
971                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
972                 qla2xxx_wake_dpc(vha);
973                 return;
974         }
975         cp = sp->cmd;
976         if (cp == NULL) {
977                 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
978                     "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
979                 qla_printk(KERN_WARNING, ha,
980                     "Command is NULL: already returned to OS (sp=%p)\n", sp);
981
982                 return;
983         }
984
985         lscsi_status = scsi_status & STATUS_MASK;
986         CMD_ENTRY_STATUS(cp) = sts->entry_status;
987         CMD_COMPL_STATUS(cp) = comp_status;
988         CMD_SCSI_STATUS(cp) = scsi_status;
989
990         fcport = sp->fcport;
991
992         sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
993         if (IS_FWI2_CAPABLE(ha)) {
994                 sense_len = le32_to_cpu(sts24->sense_len);
995                 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
996                 resid_len = le32_to_cpu(sts24->rsp_residual_count);
997                 fw_resid_len = le32_to_cpu(sts24->residual_len);
998                 rsp_info = sts24->data;
999                 sense_data = sts24->data;
1000                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1001         } else {
1002                 sense_len = le16_to_cpu(sts->req_sense_length);
1003                 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1004                 resid_len = le32_to_cpu(sts->residual_length);
1005                 rsp_info = sts->rsp_info;
1006                 sense_data = sts->req_sense_data;
1007         }
1008
1009         /* Check for any FCP transport errors. */
1010         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1011                 /* Sense data lies beyond any FCP RESPONSE data. */
1012                 if (IS_FWI2_CAPABLE(ha))
1013                         sense_data += rsp_info_len;
1014                 if (rsp_info_len > 3 && rsp_info[3]) {
1015                         DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1016                             "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1017                             "retrying command\n", vha->host_no,
1018                             cp->device->channel, cp->device->id,
1019                             cp->device->lun, rsp_info_len, rsp_info[0],
1020                             rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1021                             rsp_info[5], rsp_info[6], rsp_info[7]));
1022
1023                         cp->result = DID_BUS_BUSY << 16;
1024                         qla2x00_sp_compl(ha, sp);
1025                         return;
1026                 }
1027         }
1028
1029         /* Check for overrun. */
1030         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1031             scsi_status & SS_RESIDUAL_OVER)
1032                 comp_status = CS_DATA_OVERRUN;
1033
1034         /*
1035          * Based on Host and scsi status generate status code for Linux
1036          */
1037         switch (comp_status) {
1038         case CS_COMPLETE:
1039         case CS_QUEUE_FULL:
1040                 if (scsi_status == 0) {
1041                         cp->result = DID_OK << 16;
1042                         break;
1043                 }
1044                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1045                         resid = resid_len;
1046                         scsi_set_resid(cp, resid);
1047                         CMD_RESID_LEN(cp) = resid;
1048
1049                         if (!lscsi_status &&
1050                             ((unsigned)(scsi_bufflen(cp) - resid) <
1051                              cp->underflow)) {
1052                                 qla_printk(KERN_INFO, ha,
1053                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1054                                            "detected (%x of %x bytes)...returning "
1055                                            "error status.\n", vha->host_no,
1056                                            cp->device->channel, cp->device->id,
1057                                            cp->device->lun, resid,
1058                                            scsi_bufflen(cp));
1059
1060                                 cp->result = DID_ERROR << 16;
1061                                 break;
1062                         }
1063                 }
1064                 cp->result = DID_OK << 16 | lscsi_status;
1065
1066                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1067                         DEBUG2(printk(KERN_INFO
1068                             "scsi(%ld): QUEUE FULL status detected "
1069                             "0x%x-0x%x.\n", vha->host_no, comp_status,
1070                             scsi_status));
1071
1072                         /* Adjust queue depth for all luns on the port. */
1073                         fcport->last_queue_full = jiffies;
1074                         starget_for_each_device(cp->device->sdev_target,
1075                             fcport, qla2x00_adjust_sdev_qdepth_down);
1076                         break;
1077                 }
1078                 if (lscsi_status != SS_CHECK_CONDITION)
1079                         break;
1080
1081                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1082                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1083                         break;
1084
1085                 qla2x00_handle_sense(sp, sense_data, sense_len);
1086                 break;
1087
1088         case CS_DATA_UNDERRUN:
1089                 resid = resid_len;
1090                 /* Use F/W calculated residual length. */
1091                 if (IS_FWI2_CAPABLE(ha)) {
1092                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1093                                 lscsi_status = 0;
1094                         } else if (resid != fw_resid_len) {
1095                                 scsi_status &= ~SS_RESIDUAL_UNDER;
1096                                 lscsi_status = 0;
1097                         }
1098                         resid = fw_resid_len;
1099                 }
1100
1101                 if (scsi_status & SS_RESIDUAL_UNDER) {
1102                         scsi_set_resid(cp, resid);
1103                         CMD_RESID_LEN(cp) = resid;
1104                 } else {
1105                         DEBUG2(printk(KERN_INFO
1106                             "scsi(%ld:%d:%d) UNDERRUN status detected "
1107                             "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1108                             "os_underflow=0x%x\n", vha->host_no,
1109                             cp->device->id, cp->device->lun, comp_status,
1110                             scsi_status, resid_len, resid, cp->cmnd[0],
1111                             cp->underflow));
1112
1113                 }
1114
1115                 /*
1116                  * Check to see if SCSI Status is non zero. If so report SCSI
1117                  * Status.
1118                  */
1119                 if (lscsi_status != 0) {
1120                         cp->result = DID_OK << 16 | lscsi_status;
1121
1122                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1123                                 DEBUG2(printk(KERN_INFO
1124                                     "scsi(%ld): QUEUE FULL status detected "
1125                                     "0x%x-0x%x.\n", vha->host_no, comp_status,
1126                                     scsi_status));
1127
1128                                 /*
1129                                  * Adjust queue depth for all luns on the
1130                                  * port.
1131                                  */
1132                                 fcport->last_queue_full = jiffies;
1133                                 starget_for_each_device(
1134                                     cp->device->sdev_target, fcport,
1135                                     qla2x00_adjust_sdev_qdepth_down);
1136                                 break;
1137                         }
1138                         if (lscsi_status != SS_CHECK_CONDITION)
1139                                 break;
1140
1141                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1142                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1143                                 break;
1144
1145                         qla2x00_handle_sense(sp, sense_data, sense_len);
1146                 } else {
1147                         /*
1148                          * If RISC reports underrun and target does not report
1149                          * it then we must have a lost frame, so tell upper
1150                          * layer to retry it by reporting a bus busy.
1151                          */
1152                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1153                                 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1154                                               "frame(s) detected (%x of %x bytes)..."
1155                                               "retrying command.\n",
1156                                         vha->host_no, cp->device->channel,
1157                                         cp->device->id, cp->device->lun, resid,
1158                                         scsi_bufflen(cp)));
1159
1160                                 cp->result = DID_BUS_BUSY << 16;
1161                                 break;
1162                         }
1163
1164                         /* Handle mid-layer underflow */
1165                         if ((unsigned)(scsi_bufflen(cp) - resid) <
1166                             cp->underflow) {
1167                                 qla_printk(KERN_INFO, ha,
1168                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1169                                            "detected (%x of %x bytes)...returning "
1170                                            "error status.\n", vha->host_no,
1171                                            cp->device->channel, cp->device->id,
1172                                            cp->device->lun, resid,
1173                                            scsi_bufflen(cp));
1174
1175                                 cp->result = DID_ERROR << 16;
1176                                 break;
1177                         }
1178
1179                         /* Everybody online, looking good... */
1180                         cp->result = DID_OK << 16;
1181                 }
1182                 break;
1183
1184         case CS_DATA_OVERRUN:
1185                 DEBUG2(printk(KERN_INFO
1186                     "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1187                     vha->host_no, cp->device->id, cp->device->lun, comp_status,
1188                     scsi_status));
1189                 DEBUG2(printk(KERN_INFO
1190                     "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1191                     cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1192                     cp->cmnd[4], cp->cmnd[5]));
1193                 DEBUG2(printk(KERN_INFO
1194                     "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1195                     "status!\n",
1196                     cp->serial_number, scsi_bufflen(cp), resid_len));
1197
1198                 cp->result = DID_ERROR << 16;
1199                 break;
1200
1201         case CS_PORT_LOGGED_OUT:
1202         case CS_PORT_CONFIG_CHG:
1203         case CS_PORT_BUSY:
1204         case CS_INCOMPLETE:
1205         case CS_PORT_UNAVAILABLE:
1206                 /*
1207                  * If the port is in Target Down state, return all IOs for this
1208                  * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1209                  * retry_queue.
1210                  */
1211                 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1212                     "pid=%ld, compl status=0x%x, port state=0x%x\n",
1213                     vha->host_no, cp->device->id, cp->device->lun,
1214                     cp->serial_number, comp_status,
1215                     atomic_read(&fcport->state)));
1216
1217                 /*
1218                  * We are going to have the fc class block the rport
1219                  * while we try to recover so instruct the mid layer
1220                  * to requeue until the class decides how to handle this.
1221                  */
1222                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1223                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1224                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1225                 break;
1226
1227         case CS_RESET:
1228                 DEBUG2(printk(KERN_INFO
1229                     "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1230                     vha->host_no, comp_status, scsi_status));
1231
1232                 cp->result = DID_RESET << 16;
1233                 break;
1234
1235         case CS_ABORTED:
1236                 /*
1237                  * hv2.19.12 - DID_ABORT does not retry the request if we
1238                  * aborted this request then abort otherwise it must be a
1239                  * reset.
1240                  */
1241                 DEBUG2(printk(KERN_INFO
1242                     "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1243                     vha->host_no, comp_status, scsi_status));
1244
1245                 cp->result = DID_RESET << 16;
1246                 break;
1247
1248         case CS_TIMEOUT:
1249                 /*
1250                  * We are going to have the fc class block the rport
1251                  * while we try to recover so instruct the mid layer
1252                  * to requeue until the class decides how to handle this.
1253                  */
1254                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1255
1256                 if (IS_FWI2_CAPABLE(ha)) {
1257                         DEBUG2(printk(KERN_INFO
1258                             "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1259                             "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1260                             cp->device->id, cp->device->lun, comp_status,
1261                             scsi_status));
1262                         break;
1263                 }
1264                 DEBUG2(printk(KERN_INFO
1265                     "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1266                     "sflags=%x.\n", vha->host_no, cp->device->channel,
1267                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1268                     le16_to_cpu(sts->status_flags)));
1269
1270                 /* Check to see if logout occurred. */
1271                 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1272                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1273                 break;
1274
1275         default:
1276                 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1277                     "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1278                 qla_printk(KERN_INFO, ha,
1279                     "Unknown status detected 0x%x-0x%x.\n",
1280                     comp_status, scsi_status);
1281
1282                 cp->result = DID_ERROR << 16;
1283                 break;
1284         }
1285
1286         /* Place command on done queue. */
1287         if (vha->status_srb == NULL)
1288                 qla2x00_sp_compl(ha, sp);
1289 }
1290
1291 /**
1292  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1293  * @ha: SCSI driver HA context
1294  * @pkt: Entry pointer
1295  *
1296  * Extended sense data.
1297  */
1298 static void
1299 qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1300 {
1301         uint8_t         sense_sz = 0;
1302         struct qla_hw_data *ha = vha->hw;
1303         srb_t           *sp = vha->status_srb;
1304         struct scsi_cmnd *cp;
1305
1306         if (sp != NULL && sp->request_sense_length != 0) {
1307                 cp = sp->cmd;
1308                 if (cp == NULL) {
1309                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1310                             "sp=%p.\n", __func__, sp));
1311                         qla_printk(KERN_INFO, ha,
1312                             "cmd is NULL: already returned to OS (sp=%p)\n",
1313                             sp);
1314
1315                         vha->status_srb = NULL;
1316                         return;
1317                 }
1318
1319                 if (sp->request_sense_length > sizeof(pkt->data)) {
1320                         sense_sz = sizeof(pkt->data);
1321                 } else {
1322                         sense_sz = sp->request_sense_length;
1323                 }
1324
1325                 /* Move sense data. */
1326                 if (IS_FWI2_CAPABLE(ha))
1327                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1328                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1329                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1330
1331                 sp->request_sense_ptr += sense_sz;
1332                 sp->request_sense_length -= sense_sz;
1333
1334                 /* Place command on done queue. */
1335                 if (sp->request_sense_length == 0) {
1336                         vha->status_srb = NULL;
1337                         qla2x00_sp_compl(ha, sp);
1338                 }
1339         }
1340 }
1341
1342 /**
1343  * qla2x00_error_entry() - Process an error entry.
1344  * @ha: SCSI driver HA context
1345  * @pkt: Entry pointer
1346  */
1347 static void
1348 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1349 {
1350         srb_t *sp;
1351         struct qla_hw_data *ha = vha->hw;
1352         struct req_que *req = rsp->req;
1353 #if defined(QL_DEBUG_LEVEL_2)
1354         if (pkt->entry_status & RF_INV_E_ORDER)
1355                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1356         else if (pkt->entry_status & RF_INV_E_COUNT)
1357                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1358         else if (pkt->entry_status & RF_INV_E_PARAM)
1359                 qla_printk(KERN_ERR, ha,
1360                     "%s: Invalid Entry Parameter\n", __func__);
1361         else if (pkt->entry_status & RF_INV_E_TYPE)
1362                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1363         else if (pkt->entry_status & RF_BUSY)
1364                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1365         else
1366                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1367 #endif
1368
1369         /* Validate handle. */
1370         if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1371                 sp = req->outstanding_cmds[pkt->handle];
1372         else
1373                 sp = NULL;
1374
1375         if (sp) {
1376                 /* Free outstanding command slot. */
1377                 req->outstanding_cmds[pkt->handle] = NULL;
1378
1379                 /* Bad payload or header */
1380                 if (pkt->entry_status &
1381                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1382                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1383                         sp->cmd->result = DID_ERROR << 16;
1384                 } else if (pkt->entry_status & RF_BUSY) {
1385                         sp->cmd->result = DID_BUS_BUSY << 16;
1386                 } else {
1387                         sp->cmd->result = DID_ERROR << 16;
1388                 }
1389                 qla2x00_sp_compl(ha, sp);
1390
1391         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1392             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1393                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1394                     vha->host_no));
1395                 qla_printk(KERN_WARNING, ha,
1396                     "Error entry - invalid handle\n");
1397
1398                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1399                 qla2xxx_wake_dpc(vha);
1400         }
1401 }
1402
1403 /**
1404  * qla24xx_mbx_completion() - Process mailbox command completions.
1405  * @ha: SCSI driver HA context
1406  * @mb0: Mailbox0 register
1407  */
1408 static void
1409 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1410 {
1411         uint16_t        cnt;
1412         uint16_t __iomem *wptr;
1413         struct qla_hw_data *ha = vha->hw;
1414         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1415
1416         /* Load return mailbox registers. */
1417         ha->flags.mbox_int = 1;
1418         ha->mailbox_out[0] = mb0;
1419         wptr = (uint16_t __iomem *)&reg->mailbox1;
1420
1421         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1422                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1423                 wptr++;
1424         }
1425
1426         if (ha->mcp) {
1427                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1428                     __func__, vha->host_no, ha->mcp->mb[0]));
1429         } else {
1430                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1431                     __func__, vha->host_no));
1432         }
1433 }
1434
1435 /**
1436  * qla24xx_process_response_queue() - Process response queue entries.
1437  * @ha: SCSI driver HA context
1438  */
1439 void
1440 qla24xx_process_response_queue(struct rsp_que *rsp)
1441 {
1442         struct qla_hw_data *ha = rsp->hw;
1443         struct sts_entry_24xx *pkt;
1444         struct scsi_qla_host *vha;
1445
1446         vha = qla2x00_get_rsp_host(rsp);
1447
1448         if (!vha->flags.online)
1449                 return;
1450
1451         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1452                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1453
1454                 rsp->ring_index++;
1455                 if (rsp->ring_index == rsp->length) {
1456                         rsp->ring_index = 0;
1457                         rsp->ring_ptr = rsp->ring;
1458                 } else {
1459                         rsp->ring_ptr++;
1460                 }
1461
1462                 if (pkt->entry_status != 0) {
1463                         DEBUG3(printk(KERN_INFO
1464                             "scsi(%ld): Process error entry.\n", vha->host_no));
1465
1466                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1467                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1468                         wmb();
1469                         continue;
1470                 }
1471
1472                 switch (pkt->entry_type) {
1473                 case STATUS_TYPE:
1474                         qla2x00_status_entry(vha, rsp, pkt);
1475                         break;
1476                 case STATUS_CONT_TYPE:
1477                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1478                         break;
1479                 case VP_RPT_ID_IOCB_TYPE:
1480                         qla24xx_report_id_acquisition(vha,
1481                             (struct vp_rpt_id_entry_24xx *)pkt);
1482                         break;
1483                 default:
1484                         /* Type Not Supported. */
1485                         DEBUG4(printk(KERN_WARNING
1486                             "scsi(%ld): Received unknown response pkt type %x "
1487                             "entry status=%x.\n",
1488                             vha->host_no, pkt->entry_type, pkt->entry_status));
1489                         break;
1490                 }
1491                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1492                 wmb();
1493         }
1494
1495         /* Adjust ring index */
1496         ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
1497 }
1498
1499 static void
1500 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1501 {
1502         int rval;
1503         uint32_t cnt;
1504         struct qla_hw_data *ha = vha->hw;
1505         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1506
1507         if (!IS_QLA25XX(ha))
1508                 return;
1509
1510         rval = QLA_SUCCESS;
1511         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1512         RD_REG_DWORD(&reg->iobase_addr);
1513         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1514         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1515             rval == QLA_SUCCESS; cnt--) {
1516                 if (cnt) {
1517                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1518                         udelay(10);
1519                 } else
1520                         rval = QLA_FUNCTION_TIMEOUT;
1521         }
1522         if (rval == QLA_SUCCESS)
1523                 goto next_test;
1524
1525         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1526         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1527             rval == QLA_SUCCESS; cnt--) {
1528                 if (cnt) {
1529                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1530                         udelay(10);
1531                 } else
1532                         rval = QLA_FUNCTION_TIMEOUT;
1533         }
1534         if (rval != QLA_SUCCESS)
1535                 goto done;
1536
1537 next_test:
1538         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1539                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1540
1541 done:
1542         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1543         RD_REG_DWORD(&reg->iobase_window);
1544 }
1545
1546 /**
1547  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1548  * @irq:
1549  * @dev_id: SCSI driver HA context
1550  *
1551  * Called by system whenever the host adapter generates an interrupt.
1552  *
1553  * Returns handled flag.
1554  */
1555 irqreturn_t
1556 qla24xx_intr_handler(int irq, void *dev_id)
1557 {
1558         scsi_qla_host_t *vha;
1559         struct qla_hw_data *ha;
1560         struct device_reg_24xx __iomem *reg;
1561         int             status;
1562         unsigned long   iter;
1563         uint32_t        stat;
1564         uint32_t        hccr;
1565         uint16_t        mb[4];
1566         struct rsp_que *rsp;
1567
1568         rsp = (struct rsp_que *) dev_id;
1569         if (!rsp) {
1570                 printk(KERN_INFO
1571                     "%s(): NULL response queue pointer\n", __func__);
1572                 return IRQ_NONE;
1573         }
1574
1575         ha = rsp->hw;
1576         reg = &ha->iobase->isp24;
1577         status = 0;
1578
1579         spin_lock(&ha->hardware_lock);
1580         vha = qla2x00_get_rsp_host(rsp);
1581         for (iter = 50; iter--; ) {
1582                 stat = RD_REG_DWORD(&reg->host_status);
1583                 if (stat & HSRX_RISC_PAUSED) {
1584                         if (pci_channel_offline(ha->pdev))
1585                                 break;
1586
1587                         hccr = RD_REG_DWORD(&reg->hccr);
1588
1589                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1590                             "Dumping firmware!\n", hccr);
1591
1592                         qla2xxx_check_risc_status(vha);
1593
1594                         ha->isp_ops->fw_dump(vha, 1);
1595                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1596                         break;
1597                 } else if ((stat & HSRX_RISC_INT) == 0)
1598                         break;
1599
1600                 switch (stat & 0xff) {
1601                 case 0x1:
1602                 case 0x2:
1603                 case 0x10:
1604                 case 0x11:
1605                         qla24xx_mbx_completion(vha, MSW(stat));
1606                         status |= MBX_INTERRUPT;
1607
1608                         break;
1609                 case 0x12:
1610                         mb[0] = MSW(stat);
1611                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1612                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1613                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1614                         qla2x00_async_event(vha, rsp, mb);
1615                         break;
1616                 case 0x13:
1617                 case 0x14:
1618                         qla24xx_process_response_queue(rsp);
1619                         break;
1620                 default:
1621                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1622                             "(%d).\n",
1623                             vha->host_no, stat & 0xff));
1624                         break;
1625                 }
1626                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1627                 RD_REG_DWORD_RELAXED(&reg->hccr);
1628         }
1629         spin_unlock(&ha->hardware_lock);
1630
1631         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1632             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1633                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1634                 complete(&ha->mbx_intr_comp);
1635         }
1636
1637         return IRQ_HANDLED;
1638 }
1639
1640 static irqreturn_t
1641 qla24xx_msix_rsp_q(int irq, void *dev_id)
1642 {
1643         struct qla_hw_data *ha;
1644         struct rsp_que *rsp;
1645         struct device_reg_24xx __iomem *reg;
1646
1647         rsp = (struct rsp_que *) dev_id;
1648         if (!rsp) {
1649                 printk(KERN_INFO
1650                 "%s(): NULL response queue pointer\n", __func__);
1651                 return IRQ_NONE;
1652         }
1653         ha = rsp->hw;
1654         reg = &ha->iobase->isp24;
1655
1656         spin_lock_irq(&ha->hardware_lock);
1657
1658         qla24xx_process_response_queue(rsp);
1659         WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1660
1661         spin_unlock_irq(&ha->hardware_lock);
1662
1663         return IRQ_HANDLED;
1664 }
1665
1666 static irqreturn_t
1667 qla25xx_msix_rsp_q(int irq, void *dev_id)
1668 {
1669         struct qla_hw_data *ha;
1670         struct rsp_que *rsp;
1671         struct device_reg_24xx __iomem *reg;
1672         uint16_t msix_disabled_hccr = 0;
1673
1674         rsp = (struct rsp_que *) dev_id;
1675         if (!rsp) {
1676                 printk(KERN_INFO
1677                         "%s(): NULL response queue pointer\n", __func__);
1678                 return IRQ_NONE;
1679         }
1680         ha = rsp->hw;
1681         reg = &ha->iobase->isp24;
1682
1683         spin_lock_irq(&ha->hardware_lock);
1684
1685         msix_disabled_hccr = rsp->options;
1686         if (!rsp->id)
1687                 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
1688         else
1689                 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6);
1690
1691         qla24xx_process_response_queue(rsp);
1692
1693         if (!msix_disabled_hccr)
1694                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1695
1696         spin_unlock_irq(&ha->hardware_lock);
1697
1698         return IRQ_HANDLED;
1699 }
1700
1701 static irqreturn_t
1702 qla24xx_msix_default(int irq, void *dev_id)
1703 {
1704         scsi_qla_host_t *vha;
1705         struct qla_hw_data *ha;
1706         struct rsp_que *rsp;
1707         struct device_reg_24xx __iomem *reg;
1708         int             status;
1709         uint32_t        stat;
1710         uint32_t        hccr;
1711         uint16_t        mb[4];
1712
1713         rsp = (struct rsp_que *) dev_id;
1714         if (!rsp) {
1715                 DEBUG(printk(
1716                 "%s(): NULL response queue pointer\n", __func__));
1717                 return IRQ_NONE;
1718         }
1719         ha = rsp->hw;
1720         reg = &ha->iobase->isp24;
1721         status = 0;
1722
1723         spin_lock_irq(&ha->hardware_lock);
1724         vha = qla2x00_get_rsp_host(rsp);
1725         do {
1726                 stat = RD_REG_DWORD(&reg->host_status);
1727                 if (stat & HSRX_RISC_PAUSED) {
1728                         if (pci_channel_offline(ha->pdev))
1729                                 break;
1730
1731                         hccr = RD_REG_DWORD(&reg->hccr);
1732
1733                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1734                             "Dumping firmware!\n", hccr);
1735
1736                         qla2xxx_check_risc_status(vha);
1737
1738                         ha->isp_ops->fw_dump(vha, 1);
1739                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1740                         break;
1741                 } else if ((stat & HSRX_RISC_INT) == 0)
1742                         break;
1743
1744                 switch (stat & 0xff) {
1745                 case 0x1:
1746                 case 0x2:
1747                 case 0x10:
1748                 case 0x11:
1749                         qla24xx_mbx_completion(vha, MSW(stat));
1750                         status |= MBX_INTERRUPT;
1751
1752                         break;
1753                 case 0x12:
1754                         mb[0] = MSW(stat);
1755                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1756                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1757                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1758                         qla2x00_async_event(vha, rsp, mb);
1759                         break;
1760                 case 0x13:
1761                 case 0x14:
1762                         qla24xx_process_response_queue(rsp);
1763                         break;
1764                 default:
1765                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1766                             "(%d).\n",
1767                             vha->host_no, stat & 0xff));
1768                         break;
1769                 }
1770                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1771         } while (0);
1772         spin_unlock_irq(&ha->hardware_lock);
1773
1774         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1775             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1776                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1777                 complete(&ha->mbx_intr_comp);
1778         }
1779
1780         return IRQ_HANDLED;
1781 }
1782
1783 /* Interrupt handling helpers. */
1784
1785 struct qla_init_msix_entry {
1786         uint16_t entry;
1787         uint16_t index;
1788         const char *name;
1789         irq_handler_t handler;
1790 };
1791
1792 static struct qla_init_msix_entry base_queue = {
1793         .entry = 0,
1794         .index = 0,
1795         .name = "qla2xxx (default)",
1796         .handler = qla24xx_msix_default,
1797 };
1798
1799 static struct qla_init_msix_entry base_rsp_queue = {
1800         .entry = 1,
1801         .index = 1,
1802         .name = "qla2xxx (rsp_q)",
1803         .handler = qla24xx_msix_rsp_q,
1804 };
1805
1806 static struct qla_init_msix_entry multi_rsp_queue = {
1807         .entry = 1,
1808         .index = 1,
1809         .name = "qla2xxx (multi_q)",
1810         .handler = qla25xx_msix_rsp_q,
1811 };
1812
1813 static void
1814 qla24xx_disable_msix(struct qla_hw_data *ha)
1815 {
1816         int i;
1817         struct qla_msix_entry *qentry;
1818
1819         for (i = 0; i < ha->msix_count; i++) {
1820                 qentry = &ha->msix_entries[i];
1821                 if (qentry->have_irq)
1822                         free_irq(qentry->vector, qentry->rsp);
1823         }
1824         pci_disable_msix(ha->pdev);
1825         kfree(ha->msix_entries);
1826         ha->msix_entries = NULL;
1827         ha->flags.msix_enabled = 0;
1828 }
1829
1830 static int
1831 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1832 {
1833         int i, ret;
1834         struct msix_entry *entries;
1835         struct qla_msix_entry *qentry;
1836         struct qla_init_msix_entry *msix_queue;
1837
1838         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1839                                         GFP_KERNEL);
1840         if (!entries)
1841                 return -ENOMEM;
1842
1843         for (i = 0; i < ha->msix_count; i++)
1844                 entries[i].entry = i;
1845
1846         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1847         if (ret) {
1848                 qla_printk(KERN_WARNING, ha,
1849                         "MSI-X: Failed to enable support -- %d/%d\n"
1850                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
1851                 ha->msix_count = ret;
1852                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1853                 if (ret) {
1854                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1855                                 " support, giving up -- %d/%d\n",
1856                                 ha->msix_count, ret);
1857                         goto msix_out;
1858                 }
1859                 ha->max_queues = ha->msix_count - 1;
1860         }
1861         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1862                                 ha->msix_count, GFP_KERNEL);
1863         if (!ha->msix_entries) {
1864                 ret = -ENOMEM;
1865                 goto msix_out;
1866         }
1867         ha->flags.msix_enabled = 1;
1868
1869         for (i = 0; i < ha->msix_count; i++) {
1870                 qentry = &ha->msix_entries[i];
1871                 qentry->vector = entries[i].vector;
1872                 qentry->entry = entries[i].entry;
1873                 qentry->have_irq = 0;
1874                 qentry->rsp = NULL;
1875         }
1876
1877         /* Enable MSI-X for AENs for queue 0 */
1878         qentry = &ha->msix_entries[0];
1879         ret = request_irq(qentry->vector, base_queue.handler, 0,
1880                                         base_queue.name, rsp);
1881         if (ret) {
1882                 qla_printk(KERN_WARNING, ha,
1883                         "MSI-X: Unable to register handler -- %x/%d.\n",
1884                         qentry->vector, ret);
1885                 qla24xx_disable_msix(ha);
1886                 goto msix_out;
1887         }
1888         qentry->have_irq = 1;
1889         qentry->rsp = rsp;
1890
1891         /* Enable MSI-X vector for response queue update for queue 0 */
1892         if (ha->max_queues > 1 && ha->mqiobase) {
1893                 ha->mqenable = 1;
1894                 msix_queue = &multi_rsp_queue;
1895                 qla_printk(KERN_INFO, ha,
1896                                 "MQ enabled, Number of Queue Resources: %d \n",
1897                                 ha->max_queues);
1898         } else {
1899                 ha->mqenable = 0;
1900                 msix_queue = &base_rsp_queue;
1901         }
1902
1903         qentry = &ha->msix_entries[1];
1904         ret = request_irq(qentry->vector, msix_queue->handler, 0,
1905                                                 msix_queue->name, rsp);
1906         if (ret) {
1907                 qla_printk(KERN_WARNING, ha,
1908                         "MSI-X: Unable to register handler -- %x/%d.\n",
1909                         qentry->vector, ret);
1910                 qla24xx_disable_msix(ha);
1911                 ha->mqenable = 0;
1912                 goto msix_out;
1913         }
1914         qentry->have_irq = 1;
1915         qentry->rsp = rsp;
1916
1917 msix_out:
1918         kfree(entries);
1919         return ret;
1920 }
1921
1922 int
1923 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1924 {
1925         int ret;
1926         device_reg_t __iomem *reg = ha->iobase;
1927
1928         /* If possible, enable MSI-X. */
1929         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1930                 goto skip_msix;
1931
1932         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1933                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1934                 DEBUG2(qla_printk(KERN_WARNING, ha,
1935                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1936                         ha->pdev->revision, ha->fw_attributes));
1937
1938                 goto skip_msix;
1939         }
1940
1941         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1942             (ha->pdev->subsystem_device == 0x7040 ||
1943                 ha->pdev->subsystem_device == 0x7041 ||
1944                 ha->pdev->subsystem_device == 0x1705)) {
1945                 DEBUG2(qla_printk(KERN_WARNING, ha,
1946                     "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1947                     ha->pdev->subsystem_vendor,
1948                     ha->pdev->subsystem_device));
1949
1950                 goto skip_msi;
1951         }
1952
1953         ret = qla24xx_enable_msix(ha, rsp);
1954         if (!ret) {
1955                 DEBUG2(qla_printk(KERN_INFO, ha,
1956                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1957                     ha->fw_attributes));
1958                 goto clear_risc_ints;
1959         }
1960         qla_printk(KERN_WARNING, ha,
1961             "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1962 skip_msix:
1963
1964         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1965                 goto skip_msi;
1966
1967         ret = pci_enable_msi(ha->pdev);
1968         if (!ret) {
1969                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1970                 ha->flags.msi_enabled = 1;
1971         }
1972 skip_msi:
1973
1974         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1975             IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
1976         if (ret) {
1977                 qla_printk(KERN_WARNING, ha,
1978                     "Failed to reserve interrupt %d already in use.\n",
1979                     ha->pdev->irq);
1980                 goto fail;
1981         }
1982         ha->flags.inta_enabled = 1;
1983 clear_risc_ints:
1984
1985         spin_lock_irq(&ha->hardware_lock);
1986         if (IS_FWI2_CAPABLE(ha)) {
1987                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1988                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1989         } else {
1990                 WRT_REG_WORD(&reg->isp.semaphore, 0);
1991                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
1992                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1993         }
1994         spin_unlock_irq(&ha->hardware_lock);
1995
1996 fail:
1997         return ret;
1998 }
1999
2000 void
2001 qla2x00_free_irqs(scsi_qla_host_t *vha)
2002 {
2003         struct qla_hw_data *ha = vha->hw;
2004         struct rsp_que *rsp = ha->rsp_q_map[0];
2005
2006         if (ha->flags.msix_enabled)
2007                 qla24xx_disable_msix(ha);
2008         else if (ha->flags.inta_enabled) {
2009                 free_irq(ha->pdev->irq, rsp);
2010                 pci_disable_msi(ha->pdev);
2011         }
2012 }
2013
2014 static struct scsi_qla_host *
2015 qla2x00_get_rsp_host(struct rsp_que *rsp)
2016 {
2017         srb_t *sp;
2018         struct qla_hw_data *ha = rsp->hw;
2019         struct scsi_qla_host *vha = NULL;
2020         struct sts_entry_24xx *pkt;
2021         struct req_que *req;
2022
2023         if (rsp->id) {
2024                 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2025                 req = rsp->req;
2026                 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2027                         sp = req->outstanding_cmds[pkt->handle];
2028                         if (sp)
2029                                 vha = sp->fcport->vha;
2030                 }
2031         }
2032         if (!vha)
2033         /* handle it in base queue */
2034                 vha = pci_get_drvdata(ha->pdev);
2035
2036         return vha;
2037 }
2038
2039 int qla25xx_request_irq(struct rsp_que *rsp)
2040 {
2041         struct qla_hw_data *ha = rsp->hw;
2042         struct qla_init_msix_entry *intr = &multi_rsp_queue;
2043         struct qla_msix_entry *msix = rsp->msix;
2044         int ret;
2045
2046         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2047         if (ret) {
2048                 qla_printk(KERN_WARNING, ha,
2049                         "MSI-X: Unable to register handler -- %x/%d.\n",
2050                         msix->vector, ret);
2051                 return ret;
2052         }
2053         msix->have_irq = 1;
2054         msix->rsp = rsp;
2055         return ret;
2056 }
2057
2058 void
2059 qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2060 {
2061         device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
2062         WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
2063 }
2064
2065 void
2066 qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2067 {
2068         device_reg_t __iomem *reg = (void *) ha->iobase;
2069         WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
2070 }
2071