]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/scsi/qla2xxx/qla_isr.c
[SCSI] qla2xxx: Fix a bug that clears the interrupt status register for the base...
[net-next-2.6.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *,
14         struct req_que *, uint32_t);
15 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
17 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18         sts_entry_t *);
19
20 /**
21  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
22  * @irq:
23  * @dev_id: SCSI driver HA context
24  *
25  * Called by system whenever the host adapter generates an interrupt.
26  *
27  * Returns handled flag.
28  */
29 irqreturn_t
30 qla2100_intr_handler(int irq, void *dev_id)
31 {
32         scsi_qla_host_t *vha;
33         struct qla_hw_data *ha;
34         struct device_reg_2xxx __iomem *reg;
35         int             status;
36         unsigned long   iter;
37         uint16_t        hccr;
38         uint16_t        mb[4];
39         struct rsp_que *rsp;
40         unsigned long   flags;
41
42         rsp = (struct rsp_que *) dev_id;
43         if (!rsp) {
44                 printk(KERN_INFO
45                     "%s(): NULL response queue pointer\n", __func__);
46                 return (IRQ_NONE);
47         }
48
49         ha = rsp->hw;
50         reg = &ha->iobase->isp;
51         status = 0;
52
53         spin_lock_irqsave(&ha->hardware_lock, flags);
54         vha = pci_get_drvdata(ha->pdev);
55         for (iter = 50; iter--; ) {
56                 hccr = RD_REG_WORD(&reg->hccr);
57                 if (hccr & HCCR_RISC_PAUSE) {
58                         if (pci_channel_offline(ha->pdev))
59                                 break;
60
61                         /*
62                          * Issue a "HARD" reset in order for the RISC interrupt
63                          * bit to be cleared.  Schedule a big hammmer to get
64                          * out of the RISC PAUSED state.
65                          */
66                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
67                         RD_REG_WORD(&reg->hccr);
68
69                         ha->isp_ops->fw_dump(vha, 1);
70                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
71                         break;
72                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
73                         break;
74
75                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
76                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
77                         RD_REG_WORD(&reg->hccr);
78
79                         /* Get mailbox data. */
80                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
81                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
82                                 qla2x00_mbx_completion(vha, mb[0]);
83                                 status |= MBX_INTERRUPT;
84                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
85                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
86                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
87                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
88                                 qla2x00_async_event(vha, rsp, mb);
89                         } else {
90                                 /*EMPTY*/
91                                 DEBUG2(printk("scsi(%ld): Unrecognized "
92                                     "interrupt type (%d).\n",
93                                     vha->host_no, mb[0]));
94                         }
95                         /* Release mailbox registers. */
96                         WRT_REG_WORD(&reg->semaphore, 0);
97                         RD_REG_WORD(&reg->semaphore);
98                 } else {
99                         qla2x00_process_response_queue(rsp);
100
101                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
102                         RD_REG_WORD(&reg->hccr);
103                 }
104         }
105         spin_unlock_irqrestore(&ha->hardware_lock, flags);
106
107         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
109                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
110                 complete(&ha->mbx_intr_comp);
111         }
112
113         return (IRQ_HANDLED);
114 }
115
116 /**
117  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
118  * @irq:
119  * @dev_id: SCSI driver HA context
120  *
121  * Called by system whenever the host adapter generates an interrupt.
122  *
123  * Returns handled flag.
124  */
125 irqreturn_t
126 qla2300_intr_handler(int irq, void *dev_id)
127 {
128         scsi_qla_host_t *vha;
129         struct device_reg_2xxx __iomem *reg;
130         int             status;
131         unsigned long   iter;
132         uint32_t        stat;
133         uint16_t        hccr;
134         uint16_t        mb[4];
135         struct rsp_que *rsp;
136         struct qla_hw_data *ha;
137         unsigned long   flags;
138
139         rsp = (struct rsp_que *) dev_id;
140         if (!rsp) {
141                 printk(KERN_INFO
142                     "%s(): NULL response queue pointer\n", __func__);
143                 return (IRQ_NONE);
144         }
145
146         ha = rsp->hw;
147         reg = &ha->iobase->isp;
148         status = 0;
149
150         spin_lock_irqsave(&ha->hardware_lock, flags);
151         vha = pci_get_drvdata(ha->pdev);
152         for (iter = 50; iter--; ) {
153                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
154                 if (stat & HSR_RISC_PAUSED) {
155                         if (pci_channel_offline(ha->pdev))
156                                 break;
157
158                         hccr = RD_REG_WORD(&reg->hccr);
159                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
160                                 qla_printk(KERN_INFO, ha, "Parity error -- "
161                                     "HCCR=%x, Dumping firmware!\n", hccr);
162                         else
163                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
164                                     "HCCR=%x, Dumping firmware!\n", hccr);
165
166                         /*
167                          * Issue a "HARD" reset in order for the RISC
168                          * interrupt bit to be cleared.  Schedule a big
169                          * hammmer to get out of the RISC PAUSED state.
170                          */
171                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
172                         RD_REG_WORD(&reg->hccr);
173
174                         ha->isp_ops->fw_dump(vha, 1);
175                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
176                         break;
177                 } else if ((stat & HSR_RISC_INT) == 0)
178                         break;
179
180                 switch (stat & 0xff) {
181                 case 0x1:
182                 case 0x2:
183                 case 0x10:
184                 case 0x11:
185                         qla2x00_mbx_completion(vha, MSW(stat));
186                         status |= MBX_INTERRUPT;
187
188                         /* Release mailbox registers. */
189                         WRT_REG_WORD(&reg->semaphore, 0);
190                         break;
191                 case 0x12:
192                         mb[0] = MSW(stat);
193                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
194                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
195                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
196                         qla2x00_async_event(vha, rsp, mb);
197                         break;
198                 case 0x13:
199                         qla2x00_process_response_queue(rsp);
200                         break;
201                 case 0x15:
202                         mb[0] = MBA_CMPLT_1_16BIT;
203                         mb[1] = MSW(stat);
204                         qla2x00_async_event(vha, rsp, mb);
205                         break;
206                 case 0x16:
207                         mb[0] = MBA_SCSI_COMPLETION;
208                         mb[1] = MSW(stat);
209                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
210                         qla2x00_async_event(vha, rsp, mb);
211                         break;
212                 default:
213                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
214                             "(%d).\n",
215                             vha->host_no, stat & 0xff));
216                         break;
217                 }
218                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
219                 RD_REG_WORD_RELAXED(&reg->hccr);
220         }
221         spin_unlock_irqrestore(&ha->hardware_lock, flags);
222
223         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
224             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
225                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
226                 complete(&ha->mbx_intr_comp);
227         }
228
229         return (IRQ_HANDLED);
230 }
231
232 /**
233  * qla2x00_mbx_completion() - Process mailbox command completions.
234  * @ha: SCSI driver HA context
235  * @mb0: Mailbox0 register
236  */
237 static void
238 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
239 {
240         uint16_t        cnt;
241         uint16_t __iomem *wptr;
242         struct qla_hw_data *ha = vha->hw;
243         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
244
245         /* Load return mailbox registers. */
246         ha->flags.mbox_int = 1;
247         ha->mailbox_out[0] = mb0;
248         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
249
250         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
251                 if (IS_QLA2200(ha) && cnt == 8)
252                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
253                 if (cnt == 4 || cnt == 5)
254                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
255                 else
256                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
257
258                 wptr++;
259         }
260
261         if (ha->mcp) {
262                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
263                     __func__, vha->host_no, ha->mcp->mb[0]));
264         } else {
265                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
266                     __func__, vha->host_no));
267         }
268 }
269
270 static void
271 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
272 {
273         static char *event[] =
274                 { "Complete", "Request Notification", "Time Extension" };
275         int rval;
276         struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
277         uint16_t __iomem *wptr;
278         uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
279
280         /* Seed data -- mailbox1 -> mailbox7. */
281         wptr = (uint16_t __iomem *)&reg24->mailbox1;
282         for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
283                 mb[cnt] = RD_REG_WORD(wptr);
284
285         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
286             "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no,
287             event[aen & 0xff],
288             mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]));
289
290         /* Acknowledgement needed? [Notify && non-zero timeout]. */
291         timeout = (descr >> 8) & 0xf;
292         if (aen != MBA_IDC_NOTIFY || !timeout)
293                 return;
294
295         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
296             "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout));
297
298         rval = qla2x00_post_idc_ack_work(vha, mb);
299         if (rval != QLA_SUCCESS)
300                 qla_printk(KERN_WARNING, vha->hw,
301                     "IDC failed to post ACK.\n");
302 }
303
304 /**
305  * qla2x00_async_event() - Process aynchronous events.
306  * @ha: SCSI driver HA context
307  * @mb: Mailbox registers (0 - 3)
308  */
309 void
310 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
311 {
312 #define LS_UNKNOWN      2
313         static char     *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
314         char            *link_speed;
315         uint16_t        handle_cnt;
316         uint16_t        cnt;
317         uint32_t        handles[5];
318         struct qla_hw_data *ha = vha->hw;
319         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
320         uint32_t        rscn_entry, host_pid;
321         uint8_t         rscn_queue_index;
322         unsigned long   flags;
323
324         /* Setup to process RIO completion. */
325         handle_cnt = 0;
326         if (IS_QLA81XX(ha))
327                 goto skip_rio;
328         switch (mb[0]) {
329         case MBA_SCSI_COMPLETION:
330                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
331                 handle_cnt = 1;
332                 break;
333         case MBA_CMPLT_1_16BIT:
334                 handles[0] = mb[1];
335                 handle_cnt = 1;
336                 mb[0] = MBA_SCSI_COMPLETION;
337                 break;
338         case MBA_CMPLT_2_16BIT:
339                 handles[0] = mb[1];
340                 handles[1] = mb[2];
341                 handle_cnt = 2;
342                 mb[0] = MBA_SCSI_COMPLETION;
343                 break;
344         case MBA_CMPLT_3_16BIT:
345                 handles[0] = mb[1];
346                 handles[1] = mb[2];
347                 handles[2] = mb[3];
348                 handle_cnt = 3;
349                 mb[0] = MBA_SCSI_COMPLETION;
350                 break;
351         case MBA_CMPLT_4_16BIT:
352                 handles[0] = mb[1];
353                 handles[1] = mb[2];
354                 handles[2] = mb[3];
355                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
356                 handle_cnt = 4;
357                 mb[0] = MBA_SCSI_COMPLETION;
358                 break;
359         case MBA_CMPLT_5_16BIT:
360                 handles[0] = mb[1];
361                 handles[1] = mb[2];
362                 handles[2] = mb[3];
363                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
364                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
365                 handle_cnt = 5;
366                 mb[0] = MBA_SCSI_COMPLETION;
367                 break;
368         case MBA_CMPLT_2_32BIT:
369                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
370                 handles[1] = le32_to_cpu(
371                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
372                     RD_MAILBOX_REG(ha, reg, 6));
373                 handle_cnt = 2;
374                 mb[0] = MBA_SCSI_COMPLETION;
375                 break;
376         default:
377                 break;
378         }
379 skip_rio:
380         switch (mb[0]) {
381         case MBA_SCSI_COMPLETION:       /* Fast Post */
382                 if (!vha->flags.online)
383                         break;
384
385                 for (cnt = 0; cnt < handle_cnt; cnt++)
386                         qla2x00_process_completed_request(vha, rsp->req,
387                                 handles[cnt]);
388                 break;
389
390         case MBA_RESET:                 /* Reset */
391                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
392                         vha->host_no));
393
394                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
395                 break;
396
397         case MBA_SYSTEM_ERR:            /* System Error */
398                 qla_printk(KERN_INFO, ha,
399                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
400                     mb[1], mb[2], mb[3]);
401
402                 ha->isp_ops->fw_dump(vha, 1);
403
404                 if (IS_FWI2_CAPABLE(ha)) {
405                         if (mb[1] == 0 && mb[2] == 0) {
406                                 qla_printk(KERN_ERR, ha,
407                                     "Unrecoverable Hardware Error: adapter "
408                                     "marked OFFLINE!\n");
409                                 vha->flags.online = 0;
410                         } else
411                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
412                 } else if (mb[1] == 0) {
413                         qla_printk(KERN_INFO, ha,
414                             "Unrecoverable Hardware Error: adapter marked "
415                             "OFFLINE!\n");
416                         vha->flags.online = 0;
417                 } else
418                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
419                 break;
420
421         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
422                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
423                     vha->host_no));
424                 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
425
426                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
427                 break;
428
429         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
430                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
431                     vha->host_no));
432                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
433
434                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
435                 break;
436
437         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
438                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
439                     vha->host_no));
440                 break;
441
442         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
443                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
444                     mb[1]));
445                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
446
447                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
448                         atomic_set(&vha->loop_state, LOOP_DOWN);
449                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
450                         qla2x00_mark_all_devices_lost(vha, 1);
451                 }
452
453                 if (vha->vp_idx) {
454                         atomic_set(&vha->vp_state, VP_FAILED);
455                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
456                 }
457
458                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
459                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
460
461                 vha->flags.management_server_logged_in = 0;
462                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
463                 break;
464
465         case MBA_LOOP_UP:               /* Loop Up Event */
466                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
467                         link_speed = link_speeds[0];
468                         ha->link_data_rate = PORT_SPEED_1GB;
469                 } else {
470                         link_speed = link_speeds[LS_UNKNOWN];
471                         if (mb[1] < 5)
472                                 link_speed = link_speeds[mb[1]];
473                         else if (mb[1] == 0x13)
474                                 link_speed = link_speeds[5];
475                         ha->link_data_rate = mb[1];
476                 }
477
478                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
479                     vha->host_no, link_speed));
480                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
481                     link_speed);
482
483                 vha->flags.management_server_logged_in = 0;
484                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
485                 break;
486
487         case MBA_LOOP_DOWN:             /* Loop Down Event */
488                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
489                     "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
490                 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
491                     mb[1], mb[2], mb[3]);
492
493                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
494                         atomic_set(&vha->loop_state, LOOP_DOWN);
495                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
496                         vha->device_flags |= DFLG_NO_CABLE;
497                         qla2x00_mark_all_devices_lost(vha, 1);
498                 }
499
500                 if (vha->vp_idx) {
501                         atomic_set(&vha->vp_state, VP_FAILED);
502                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
503                 }
504
505                 vha->flags.management_server_logged_in = 0;
506                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
507                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
508                 break;
509
510         case MBA_LIP_RESET:             /* LIP reset occurred */
511                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
512                     vha->host_no, mb[1]));
513                 qla_printk(KERN_INFO, ha,
514                     "LIP reset occurred (%x).\n", mb[1]);
515
516                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
517                         atomic_set(&vha->loop_state, LOOP_DOWN);
518                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
519                         qla2x00_mark_all_devices_lost(vha, 1);
520                 }
521
522                 if (vha->vp_idx) {
523                         atomic_set(&vha->vp_state, VP_FAILED);
524                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
525                 }
526
527                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
528
529                 ha->operating_mode = LOOP;
530                 vha->flags.management_server_logged_in = 0;
531                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
532                 break;
533
534         /* case MBA_DCBX_COMPLETE: */
535         case MBA_POINT_TO_POINT:        /* Point-to-Point */
536                 if (IS_QLA2100(ha))
537                         break;
538
539                 if (IS_QLA81XX(ha))
540                         DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
541                             "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
542                 else
543                         DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
544                             "received.\n", vha->host_no));
545
546                 /*
547                  * Until there's a transition from loop down to loop up, treat
548                  * this as loop down only.
549                  */
550                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
551                         atomic_set(&vha->loop_state, LOOP_DOWN);
552                         if (!atomic_read(&vha->loop_down_timer))
553                                 atomic_set(&vha->loop_down_timer,
554                                     LOOP_DOWN_TIME);
555                         qla2x00_mark_all_devices_lost(vha, 1);
556                 }
557
558                 if (vha->vp_idx) {
559                         atomic_set(&vha->vp_state, VP_FAILED);
560                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
561                 }
562
563                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
564                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
565
566                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
567                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
568
569                 ha->flags.gpsc_supported = 1;
570                 vha->flags.management_server_logged_in = 0;
571                 break;
572
573         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
574                 if (IS_QLA2100(ha))
575                         break;
576
577                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
578                     "received.\n",
579                     vha->host_no));
580                 qla_printk(KERN_INFO, ha,
581                     "Configuration change detected: value=%x.\n", mb[1]);
582
583                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
584                         atomic_set(&vha->loop_state, LOOP_DOWN);
585                         if (!atomic_read(&vha->loop_down_timer))
586                                 atomic_set(&vha->loop_down_timer,
587                                     LOOP_DOWN_TIME);
588                         qla2x00_mark_all_devices_lost(vha, 1);
589                 }
590
591                 if (vha->vp_idx) {
592                         atomic_set(&vha->vp_state, VP_FAILED);
593                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
594                 }
595
596                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
597                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
598                 break;
599
600         case MBA_PORT_UPDATE:           /* Port database update */
601                 /*
602                  * Handle only global and vn-port update events
603                  *
604                  * Relevant inputs:
605                  * mb[1] = N_Port handle of changed port
606                  * OR 0xffff for global event
607                  * mb[2] = New login state
608                  * 7 = Port logged out
609                  * mb[3] = LSB is vp_idx, 0xff = all vps
610                  *
611                  * Skip processing if:
612                  *       Event is global, vp_idx is NOT all vps,
613                  *           vp_idx does not match
614                  *       Event is not global, vp_idx does not match
615                  */
616                 if ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff)
617                         || (mb[1] != 0xffff)) {
618                         if (vha->vp_idx != (mb[3] & 0xff))
619                                 break;
620                 }
621
622                 /* Global event -- port logout or port unavailable. */
623                 if (mb[1] == 0xffff && mb[2] == 0x7) {
624                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
625                             vha->host_no));
626                         DEBUG(printk(KERN_INFO
627                             "scsi(%ld): Port unavailable %04x %04x %04x.\n",
628                             vha->host_no, mb[1], mb[2], mb[3]));
629
630                         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
631                                 atomic_set(&vha->loop_state, LOOP_DOWN);
632                                 atomic_set(&vha->loop_down_timer,
633                                     LOOP_DOWN_TIME);
634                                 vha->device_flags |= DFLG_NO_CABLE;
635                                 qla2x00_mark_all_devices_lost(vha, 1);
636                         }
637
638                         if (vha->vp_idx) {
639                                 atomic_set(&vha->vp_state, VP_FAILED);
640                                 fc_vport_set_state(vha->fc_vport,
641                                     FC_VPORT_FAILED);
642                                 qla2x00_mark_all_devices_lost(vha, 1);
643                         }
644
645                         vha->flags.management_server_logged_in = 0;
646                         ha->link_data_rate = PORT_SPEED_UNKNOWN;
647                         break;
648                 }
649
650                 /*
651                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
652                  * event etc. earlier indicating loop is down) then process
653                  * it.  Otherwise ignore it and Wait for RSCN to come in.
654                  */
655                 atomic_set(&vha->loop_down_timer, 0);
656                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
657                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
658                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
659                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
660                             mb[2], mb[3]));
661                         break;
662                 }
663
664                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
665                     vha->host_no));
666                 DEBUG(printk(KERN_INFO
667                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
668                     vha->host_no, mb[1], mb[2], mb[3]));
669
670                 /*
671                  * Mark all devices as missing so we will login again.
672                  */
673                 atomic_set(&vha->loop_state, LOOP_UP);
674
675                 qla2x00_mark_all_devices_lost(vha, 1);
676
677                 vha->flags.rscn_queue_overflow = 1;
678
679                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
680                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
681                 break;
682
683         case MBA_RSCN_UPDATE:           /* State Change Registration */
684                 /* Check if the Vport has issued a SCR */
685                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
686                         break;
687                 /* Only handle SCNs for our Vport index. */
688                 if (vha->vp_idx != (mb[3] & 0xff))
689                         break;
690                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
691                     vha->host_no));
692                 DEBUG(printk(KERN_INFO
693                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
694                     vha->host_no, mb[1], mb[2], mb[3]));
695
696                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
697                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
698                                 | vha->d_id.b.al_pa;
699                 if (rscn_entry == host_pid) {
700                         DEBUG(printk(KERN_INFO
701                             "scsi(%ld): Ignoring RSCN update to local host "
702                             "port ID (%06x)\n",
703                             vha->host_no, host_pid));
704                         break;
705                 }
706
707                 /* Ignore reserved bits from RSCN-payload. */
708                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
709                 rscn_queue_index = vha->rscn_in_ptr + 1;
710                 if (rscn_queue_index == MAX_RSCN_COUNT)
711                         rscn_queue_index = 0;
712                 if (rscn_queue_index != vha->rscn_out_ptr) {
713                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
714                         vha->rscn_in_ptr = rscn_queue_index;
715                 } else {
716                         vha->flags.rscn_queue_overflow = 1;
717                 }
718
719                 atomic_set(&vha->loop_state, LOOP_UPDATE);
720                 atomic_set(&vha->loop_down_timer, 0);
721                 vha->flags.management_server_logged_in = 0;
722
723                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
724                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
725                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
726                 break;
727
728         /* case MBA_RIO_RESPONSE: */
729         case MBA_ZIO_RESPONSE:
730                 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
731                     vha->host_no));
732
733                 if (IS_FWI2_CAPABLE(ha))
734                         qla24xx_process_response_queue(vha, rsp);
735                 else
736                         qla2x00_process_response_queue(rsp);
737                 break;
738
739         case MBA_DISCARD_RND_FRAME:
740                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
741                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
742                 break;
743
744         case MBA_TRACE_NOTIFICATION:
745                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
746                 vha->host_no, mb[1], mb[2]));
747                 break;
748
749         case MBA_ISP84XX_ALERT:
750                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
751                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
752
753                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
754                 switch (mb[1]) {
755                 case A84_PANIC_RECOVERY:
756                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
757                             "%04x %04x\n", mb[2], mb[3]);
758                         break;
759                 case A84_OP_LOGIN_COMPLETE:
760                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
761                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
762                             "firmware version %x\n", ha->cs84xx->op_fw_version));
763                         break;
764                 case A84_DIAG_LOGIN_COMPLETE:
765                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
766                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
767                             "diagnostic firmware version %x\n",
768                             ha->cs84xx->diag_fw_version));
769                         break;
770                 case A84_GOLD_LOGIN_COMPLETE:
771                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
772                         ha->cs84xx->fw_update = 1;
773                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
774                             "firmware version %x\n",
775                             ha->cs84xx->gold_fw_version));
776                         break;
777                 default:
778                         qla_printk(KERN_ERR, ha,
779                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
780                             mb[1], mb[2], mb[3]);
781                 }
782                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
783                 break;
784         case MBA_DCBX_START:
785                 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
786                     vha->host_no, mb[1], mb[2], mb[3]));
787                 break;
788         case MBA_DCBX_PARAM_UPDATE:
789                 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
790                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
791                 break;
792         case MBA_FCF_CONF_ERR:
793                 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
794                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
795                 break;
796         case MBA_IDC_COMPLETE:
797         case MBA_IDC_NOTIFY:
798         case MBA_IDC_TIME_EXT:
799                 qla81xx_idc_event(vha, mb[0], mb[1]);
800                 break;
801         }
802
803         if (!vha->vp_idx && ha->num_vhosts)
804                 qla2x00_alert_all_vps(rsp, mb);
805 }
806
807 static void
808 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
809 {
810         fc_port_t *fcport = data;
811         struct scsi_qla_host *vha = fcport->vha;
812         struct qla_hw_data *ha = vha->hw;
813         struct req_que *req = NULL;
814
815         if (!ql2xqfulltracking)
816                 return;
817
818         req = vha->req;
819         if (!req)
820                 return;
821         if (req->max_q_depth <= sdev->queue_depth)
822                 return;
823
824         if (sdev->ordered_tags)
825                 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
826                     sdev->queue_depth + 1);
827         else
828                 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
829                     sdev->queue_depth + 1);
830
831         fcport->last_ramp_up = jiffies;
832
833         DEBUG2(qla_printk(KERN_INFO, ha,
834             "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
835             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
836             sdev->queue_depth));
837 }
838
839 static void
840 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
841 {
842         fc_port_t *fcport = data;
843
844         if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
845                 return;
846
847         DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
848             "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
849             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
850             sdev->queue_depth));
851 }
852
853 static inline void
854 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
855                                                                 srb_t *sp)
856 {
857         fc_port_t *fcport;
858         struct scsi_device *sdev;
859
860         if (!ql2xqfulltracking)
861                 return;
862
863         sdev = sp->cmd->device;
864         if (sdev->queue_depth >= req->max_q_depth)
865                 return;
866
867         fcport = sp->fcport;
868         if (time_before(jiffies,
869             fcport->last_ramp_up + ql2xqfullrampup * HZ))
870                 return;
871         if (time_before(jiffies,
872             fcport->last_queue_full + ql2xqfullrampup * HZ))
873                 return;
874
875         starget_for_each_device(sdev->sdev_target, fcport,
876             qla2x00_adjust_sdev_qdepth_up);
877 }
878
879 /**
880  * qla2x00_process_completed_request() - Process a Fast Post response.
881  * @ha: SCSI driver HA context
882  * @index: SRB index
883  */
884 static void
885 qla2x00_process_completed_request(struct scsi_qla_host *vha,
886                                 struct req_que *req, uint32_t index)
887 {
888         srb_t *sp;
889         struct qla_hw_data *ha = vha->hw;
890
891         /* Validate handle. */
892         if (index >= MAX_OUTSTANDING_COMMANDS) {
893                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
894                     vha->host_no, index));
895                 qla_printk(KERN_WARNING, ha,
896                     "Invalid SCSI completion handle %d.\n", index);
897
898                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
899                 return;
900         }
901
902         sp = req->outstanding_cmds[index];
903         if (sp) {
904                 /* Free outstanding command slot. */
905                 req->outstanding_cmds[index] = NULL;
906
907                 /* Save ISP completion status */
908                 sp->cmd->result = DID_OK << 16;
909
910                 qla2x00_ramp_up_queue_depth(vha, req, sp);
911                 qla2x00_sp_compl(ha, sp);
912         } else {
913                 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
914                         " handle(%d)\n", vha->host_no, req->id, index));
915                 qla_printk(KERN_WARNING, ha,
916                     "Invalid ISP SCSI completion handle\n");
917
918                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
919         }
920 }
921
922 /**
923  * qla2x00_process_response_queue() - Process response queue entries.
924  * @ha: SCSI driver HA context
925  */
926 void
927 qla2x00_process_response_queue(struct rsp_que *rsp)
928 {
929         struct scsi_qla_host *vha;
930         struct qla_hw_data *ha = rsp->hw;
931         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
932         sts_entry_t     *pkt;
933         uint16_t        handle_cnt;
934         uint16_t        cnt;
935
936         vha = pci_get_drvdata(ha->pdev);
937
938         if (!vha->flags.online)
939                 return;
940
941         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
942                 pkt = (sts_entry_t *)rsp->ring_ptr;
943
944                 rsp->ring_index++;
945                 if (rsp->ring_index == rsp->length) {
946                         rsp->ring_index = 0;
947                         rsp->ring_ptr = rsp->ring;
948                 } else {
949                         rsp->ring_ptr++;
950                 }
951
952                 if (pkt->entry_status != 0) {
953                         DEBUG3(printk(KERN_INFO
954                             "scsi(%ld): Process error entry.\n", vha->host_no));
955
956                         qla2x00_error_entry(vha, rsp, pkt);
957                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
958                         wmb();
959                         continue;
960                 }
961
962                 switch (pkt->entry_type) {
963                 case STATUS_TYPE:
964                         qla2x00_status_entry(vha, rsp, pkt);
965                         break;
966                 case STATUS_TYPE_21:
967                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
968                         for (cnt = 0; cnt < handle_cnt; cnt++) {
969                                 qla2x00_process_completed_request(vha, rsp->req,
970                                     ((sts21_entry_t *)pkt)->handle[cnt]);
971                         }
972                         break;
973                 case STATUS_TYPE_22:
974                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
975                         for (cnt = 0; cnt < handle_cnt; cnt++) {
976                                 qla2x00_process_completed_request(vha, rsp->req,
977                                     ((sts22_entry_t *)pkt)->handle[cnt]);
978                         }
979                         break;
980                 case STATUS_CONT_TYPE:
981                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
982                         break;
983                 default:
984                         /* Type Not Supported. */
985                         DEBUG4(printk(KERN_WARNING
986                             "scsi(%ld): Received unknown response pkt type %x "
987                             "entry status=%x.\n",
988                             vha->host_no, pkt->entry_type, pkt->entry_status));
989                         break;
990                 }
991                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
992                 wmb();
993         }
994
995         /* Adjust ring index */
996         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
997 }
998
999 static inline void
1000 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
1001         struct rsp_que *rsp)
1002 {
1003         struct scsi_cmnd *cp = sp->cmd;
1004
1005         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1006                 sense_len = SCSI_SENSE_BUFFERSIZE;
1007
1008         sp->request_sense_length = sense_len;
1009         sp->request_sense_ptr = cp->sense_buffer;
1010         if (sp->request_sense_length > 32)
1011                 sense_len = 32;
1012
1013         memcpy(cp->sense_buffer, sense_data, sense_len);
1014
1015         sp->request_sense_ptr += sense_len;
1016         sp->request_sense_length -= sense_len;
1017         if (sp->request_sense_length != 0)
1018                 rsp->status_srb = sp;
1019
1020         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
1021             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
1022             cp->device->channel, cp->device->id, cp->device->lun, cp,
1023             cp->serial_number));
1024         if (sense_len)
1025                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
1026 }
1027
1028 /**
1029  * qla2x00_status_entry() - Process a Status IOCB entry.
1030  * @ha: SCSI driver HA context
1031  * @pkt: Entry pointer
1032  */
1033 static void
1034 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1035 {
1036         srb_t           *sp;
1037         fc_port_t       *fcport;
1038         struct scsi_cmnd *cp;
1039         sts_entry_t *sts;
1040         struct sts_entry_24xx *sts24;
1041         uint16_t        comp_status;
1042         uint16_t        scsi_status;
1043         uint8_t         lscsi_status;
1044         int32_t         resid;
1045         uint32_t        sense_len, rsp_info_len, resid_len, fw_resid_len;
1046         uint8_t         *rsp_info, *sense_data;
1047         struct qla_hw_data *ha = vha->hw;
1048         uint32_t handle;
1049         uint16_t que;
1050         struct req_que *req;
1051
1052         sts = (sts_entry_t *) pkt;
1053         sts24 = (struct sts_entry_24xx *) pkt;
1054         if (IS_FWI2_CAPABLE(ha)) {
1055                 comp_status = le16_to_cpu(sts24->comp_status);
1056                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1057         } else {
1058                 comp_status = le16_to_cpu(sts->comp_status);
1059                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1060         }
1061         handle = (uint32_t) LSW(sts->handle);
1062         que = MSW(sts->handle);
1063         req = ha->req_q_map[que];
1064         /* Fast path completion. */
1065         if (comp_status == CS_COMPLETE && scsi_status == 0) {
1066                 qla2x00_process_completed_request(vha, req, handle);
1067
1068                 return;
1069         }
1070
1071         /* Validate handle. */
1072         if (handle < MAX_OUTSTANDING_COMMANDS) {
1073                 sp = req->outstanding_cmds[handle];
1074                 req->outstanding_cmds[handle] = NULL;
1075         } else
1076                 sp = NULL;
1077
1078         if (sp == NULL) {
1079                 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
1080                     vha->host_no));
1081                 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
1082
1083                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1084                 qla2xxx_wake_dpc(vha);
1085                 return;
1086         }
1087         cp = sp->cmd;
1088         if (cp == NULL) {
1089                 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1090                     "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
1091                 qla_printk(KERN_WARNING, ha,
1092                     "Command is NULL: already returned to OS (sp=%p)\n", sp);
1093
1094                 return;
1095         }
1096
1097         lscsi_status = scsi_status & STATUS_MASK;
1098
1099         fcport = sp->fcport;
1100
1101         sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
1102         if (IS_FWI2_CAPABLE(ha)) {
1103                 sense_len = le32_to_cpu(sts24->sense_len);
1104                 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1105                 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1106                 fw_resid_len = le32_to_cpu(sts24->residual_len);
1107                 rsp_info = sts24->data;
1108                 sense_data = sts24->data;
1109                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1110         } else {
1111                 sense_len = le16_to_cpu(sts->req_sense_length);
1112                 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1113                 resid_len = le32_to_cpu(sts->residual_length);
1114                 rsp_info = sts->rsp_info;
1115                 sense_data = sts->req_sense_data;
1116         }
1117
1118         /* Check for any FCP transport errors. */
1119         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1120                 /* Sense data lies beyond any FCP RESPONSE data. */
1121                 if (IS_FWI2_CAPABLE(ha))
1122                         sense_data += rsp_info_len;
1123                 if (rsp_info_len > 3 && rsp_info[3]) {
1124                         DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1125                             "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1126                             "retrying command\n", vha->host_no,
1127                             cp->device->channel, cp->device->id,
1128                             cp->device->lun, rsp_info_len, rsp_info[0],
1129                             rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1130                             rsp_info[5], rsp_info[6], rsp_info[7]));
1131
1132                         cp->result = DID_BUS_BUSY << 16;
1133                         qla2x00_sp_compl(ha, sp);
1134                         return;
1135                 }
1136         }
1137
1138         /* Check for overrun. */
1139         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1140             scsi_status & SS_RESIDUAL_OVER)
1141                 comp_status = CS_DATA_OVERRUN;
1142
1143         /*
1144          * Based on Host and scsi status generate status code for Linux
1145          */
1146         switch (comp_status) {
1147         case CS_COMPLETE:
1148         case CS_QUEUE_FULL:
1149                 if (scsi_status == 0) {
1150                         cp->result = DID_OK << 16;
1151                         break;
1152                 }
1153                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1154                         resid = resid_len;
1155                         scsi_set_resid(cp, resid);
1156
1157                         if (!lscsi_status &&
1158                             ((unsigned)(scsi_bufflen(cp) - resid) <
1159                              cp->underflow)) {
1160                                 qla_printk(KERN_INFO, ha,
1161                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1162                                            "detected (%x of %x bytes)...returning "
1163                                            "error status.\n", vha->host_no,
1164                                            cp->device->channel, cp->device->id,
1165                                            cp->device->lun, resid,
1166                                            scsi_bufflen(cp));
1167
1168                                 cp->result = DID_ERROR << 16;
1169                                 break;
1170                         }
1171                 }
1172                 cp->result = DID_OK << 16 | lscsi_status;
1173
1174                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1175                         DEBUG2(printk(KERN_INFO
1176                             "scsi(%ld): QUEUE FULL status detected "
1177                             "0x%x-0x%x.\n", vha->host_no, comp_status,
1178                             scsi_status));
1179
1180                         /* Adjust queue depth for all luns on the port. */
1181                         if (!ql2xqfulltracking)
1182                                 break;
1183                         fcport->last_queue_full = jiffies;
1184                         starget_for_each_device(cp->device->sdev_target,
1185                             fcport, qla2x00_adjust_sdev_qdepth_down);
1186                         break;
1187                 }
1188                 if (lscsi_status != SS_CHECK_CONDITION)
1189                         break;
1190
1191                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1192                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1193                         break;
1194
1195                 qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1196                 break;
1197
1198         case CS_DATA_UNDERRUN:
1199                 resid = resid_len;
1200                 /* Use F/W calculated residual length. */
1201                 if (IS_FWI2_CAPABLE(ha)) {
1202                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1203                                 lscsi_status = 0;
1204                         } else if (resid != fw_resid_len) {
1205                                 scsi_status &= ~SS_RESIDUAL_UNDER;
1206                                 lscsi_status = 0;
1207                         }
1208                         resid = fw_resid_len;
1209                 }
1210
1211                 if (scsi_status & SS_RESIDUAL_UNDER) {
1212                         scsi_set_resid(cp, resid);
1213                 } else {
1214                         DEBUG2(printk(KERN_INFO
1215                             "scsi(%ld:%d:%d) UNDERRUN status detected "
1216                             "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1217                             "os_underflow=0x%x\n", vha->host_no,
1218                             cp->device->id, cp->device->lun, comp_status,
1219                             scsi_status, resid_len, resid, cp->cmnd[0],
1220                             cp->underflow));
1221
1222                 }
1223
1224                 /*
1225                  * Check to see if SCSI Status is non zero. If so report SCSI
1226                  * Status.
1227                  */
1228                 if (lscsi_status != 0) {
1229                         cp->result = DID_OK << 16 | lscsi_status;
1230
1231                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1232                                 DEBUG2(printk(KERN_INFO
1233                                     "scsi(%ld): QUEUE FULL status detected "
1234                                     "0x%x-0x%x.\n", vha->host_no, comp_status,
1235                                     scsi_status));
1236
1237                                 /*
1238                                  * Adjust queue depth for all luns on the
1239                                  * port.
1240                                  */
1241                                 if (!ql2xqfulltracking)
1242                                         break;
1243                                 fcport->last_queue_full = jiffies;
1244                                 starget_for_each_device(
1245                                     cp->device->sdev_target, fcport,
1246                                     qla2x00_adjust_sdev_qdepth_down);
1247                                 break;
1248                         }
1249                         if (lscsi_status != SS_CHECK_CONDITION)
1250                                 break;
1251
1252                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1253                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1254                                 break;
1255
1256                         qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
1257                 } else {
1258                         /*
1259                          * If RISC reports underrun and target does not report
1260                          * it then we must have a lost frame, so tell upper
1261                          * layer to retry it by reporting an error.
1262                          */
1263                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1264                                 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1265                                               "frame(s) detected (%x of %x bytes)..."
1266                                               "retrying command.\n",
1267                                         vha->host_no, cp->device->channel,
1268                                         cp->device->id, cp->device->lun, resid,
1269                                         scsi_bufflen(cp)));
1270
1271                                 scsi_set_resid(cp, resid);
1272                                 cp->result = DID_ERROR << 16;
1273                                 break;
1274                         }
1275
1276                         /* Handle mid-layer underflow */
1277                         if ((unsigned)(scsi_bufflen(cp) - resid) <
1278                             cp->underflow) {
1279                                 qla_printk(KERN_INFO, ha,
1280                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1281                                            "detected (%x of %x bytes)...returning "
1282                                            "error status.\n", vha->host_no,
1283                                            cp->device->channel, cp->device->id,
1284                                            cp->device->lun, resid,
1285                                            scsi_bufflen(cp));
1286
1287                                 cp->result = DID_ERROR << 16;
1288                                 break;
1289                         }
1290
1291                         /* Everybody online, looking good... */
1292                         cp->result = DID_OK << 16;
1293                 }
1294                 break;
1295
1296         case CS_DATA_OVERRUN:
1297                 DEBUG2(printk(KERN_INFO
1298                     "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1299                     vha->host_no, cp->device->id, cp->device->lun, comp_status,
1300                     scsi_status));
1301                 DEBUG2(printk(KERN_INFO
1302                     "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1303                     cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1304                     cp->cmnd[4], cp->cmnd[5]));
1305                 DEBUG2(printk(KERN_INFO
1306                     "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1307                     "status!\n",
1308                     cp->serial_number, scsi_bufflen(cp), resid_len));
1309
1310                 cp->result = DID_ERROR << 16;
1311                 break;
1312
1313         case CS_PORT_LOGGED_OUT:
1314         case CS_PORT_CONFIG_CHG:
1315         case CS_PORT_BUSY:
1316         case CS_INCOMPLETE:
1317         case CS_PORT_UNAVAILABLE:
1318                 /*
1319                  * If the port is in Target Down state, return all IOs for this
1320                  * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1321                  * retry_queue.
1322                  */
1323                 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1324                     "pid=%ld, compl status=0x%x, port state=0x%x\n",
1325                     vha->host_no, cp->device->id, cp->device->lun,
1326                     cp->serial_number, comp_status,
1327                     atomic_read(&fcport->state)));
1328
1329                 /*
1330                  * We are going to have the fc class block the rport
1331                  * while we try to recover so instruct the mid layer
1332                  * to requeue until the class decides how to handle this.
1333                  */
1334                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1335                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1336                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1337                 break;
1338
1339         case CS_RESET:
1340                 DEBUG2(printk(KERN_INFO
1341                     "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1342                     vha->host_no, comp_status, scsi_status));
1343
1344                 cp->result = DID_RESET << 16;
1345                 break;
1346
1347         case CS_ABORTED:
1348                 /*
1349                  * hv2.19.12 - DID_ABORT does not retry the request if we
1350                  * aborted this request then abort otherwise it must be a
1351                  * reset.
1352                  */
1353                 DEBUG2(printk(KERN_INFO
1354                     "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1355                     vha->host_no, comp_status, scsi_status));
1356
1357                 cp->result = DID_RESET << 16;
1358                 break;
1359
1360         case CS_TIMEOUT:
1361                 /*
1362                  * We are going to have the fc class block the rport
1363                  * while we try to recover so instruct the mid layer
1364                  * to requeue until the class decides how to handle this.
1365                  */
1366                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1367
1368                 if (IS_FWI2_CAPABLE(ha)) {
1369                         DEBUG2(printk(KERN_INFO
1370                             "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1371                             "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1372                             cp->device->id, cp->device->lun, comp_status,
1373                             scsi_status));
1374                         break;
1375                 }
1376                 DEBUG2(printk(KERN_INFO
1377                     "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1378                     "sflags=%x.\n", vha->host_no, cp->device->channel,
1379                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1380                     le16_to_cpu(sts->status_flags)));
1381
1382                 /* Check to see if logout occurred. */
1383                 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1384                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1385                 break;
1386
1387         default:
1388                 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1389                     "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1390                 qla_printk(KERN_INFO, ha,
1391                     "Unknown status detected 0x%x-0x%x.\n",
1392                     comp_status, scsi_status);
1393
1394                 cp->result = DID_ERROR << 16;
1395                 break;
1396         }
1397
1398         /* Place command on done queue. */
1399         if (rsp->status_srb == NULL)
1400                 qla2x00_sp_compl(ha, sp);
1401 }
1402
1403 /**
1404  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1405  * @ha: SCSI driver HA context
1406  * @pkt: Entry pointer
1407  *
1408  * Extended sense data.
1409  */
1410 static void
1411 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1412 {
1413         uint8_t         sense_sz = 0;
1414         struct qla_hw_data *ha = rsp->hw;
1415         srb_t           *sp = rsp->status_srb;
1416         struct scsi_cmnd *cp;
1417
1418         if (sp != NULL && sp->request_sense_length != 0) {
1419                 cp = sp->cmd;
1420                 if (cp == NULL) {
1421                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1422                             "sp=%p.\n", __func__, sp));
1423                         qla_printk(KERN_INFO, ha,
1424                             "cmd is NULL: already returned to OS (sp=%p)\n",
1425                             sp);
1426
1427                         rsp->status_srb = NULL;
1428                         return;
1429                 }
1430
1431                 if (sp->request_sense_length > sizeof(pkt->data)) {
1432                         sense_sz = sizeof(pkt->data);
1433                 } else {
1434                         sense_sz = sp->request_sense_length;
1435                 }
1436
1437                 /* Move sense data. */
1438                 if (IS_FWI2_CAPABLE(ha))
1439                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1440                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1441                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1442
1443                 sp->request_sense_ptr += sense_sz;
1444                 sp->request_sense_length -= sense_sz;
1445
1446                 /* Place command on done queue. */
1447                 if (sp->request_sense_length == 0) {
1448                         rsp->status_srb = NULL;
1449                         qla2x00_sp_compl(ha, sp);
1450                 }
1451         }
1452 }
1453
1454 /**
1455  * qla2x00_error_entry() - Process an error entry.
1456  * @ha: SCSI driver HA context
1457  * @pkt: Entry pointer
1458  */
1459 static void
1460 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1461 {
1462         srb_t *sp;
1463         struct qla_hw_data *ha = vha->hw;
1464         uint32_t handle = LSW(pkt->handle);
1465         uint16_t que = MSW(pkt->handle);
1466         struct req_que *req = ha->req_q_map[que];
1467 #if defined(QL_DEBUG_LEVEL_2)
1468         if (pkt->entry_status & RF_INV_E_ORDER)
1469                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1470         else if (pkt->entry_status & RF_INV_E_COUNT)
1471                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1472         else if (pkt->entry_status & RF_INV_E_PARAM)
1473                 qla_printk(KERN_ERR, ha,
1474                     "%s: Invalid Entry Parameter\n", __func__);
1475         else if (pkt->entry_status & RF_INV_E_TYPE)
1476                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1477         else if (pkt->entry_status & RF_BUSY)
1478                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1479         else
1480                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1481 #endif
1482
1483         /* Validate handle. */
1484         if (handle < MAX_OUTSTANDING_COMMANDS)
1485                 sp = req->outstanding_cmds[handle];
1486         else
1487                 sp = NULL;
1488
1489         if (sp) {
1490                 /* Free outstanding command slot. */
1491                 req->outstanding_cmds[handle] = NULL;
1492
1493                 /* Bad payload or header */
1494                 if (pkt->entry_status &
1495                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1496                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1497                         sp->cmd->result = DID_ERROR << 16;
1498                 } else if (pkt->entry_status & RF_BUSY) {
1499                         sp->cmd->result = DID_BUS_BUSY << 16;
1500                 } else {
1501                         sp->cmd->result = DID_ERROR << 16;
1502                 }
1503                 qla2x00_sp_compl(ha, sp);
1504
1505         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1506             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1507                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1508                     vha->host_no));
1509                 qla_printk(KERN_WARNING, ha,
1510                     "Error entry - invalid handle\n");
1511
1512                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1513                 qla2xxx_wake_dpc(vha);
1514         }
1515 }
1516
1517 /**
1518  * qla24xx_mbx_completion() - Process mailbox command completions.
1519  * @ha: SCSI driver HA context
1520  * @mb0: Mailbox0 register
1521  */
1522 static void
1523 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1524 {
1525         uint16_t        cnt;
1526         uint16_t __iomem *wptr;
1527         struct qla_hw_data *ha = vha->hw;
1528         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1529
1530         /* Load return mailbox registers. */
1531         ha->flags.mbox_int = 1;
1532         ha->mailbox_out[0] = mb0;
1533         wptr = (uint16_t __iomem *)&reg->mailbox1;
1534
1535         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1536                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1537                 wptr++;
1538         }
1539
1540         if (ha->mcp) {
1541                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1542                     __func__, vha->host_no, ha->mcp->mb[0]));
1543         } else {
1544                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1545                     __func__, vha->host_no));
1546         }
1547 }
1548
1549 /**
1550  * qla24xx_process_response_queue() - Process response queue entries.
1551  * @ha: SCSI driver HA context
1552  */
1553 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1554         struct rsp_que *rsp)
1555 {
1556         struct sts_entry_24xx *pkt;
1557
1558         if (!vha->flags.online)
1559                 return;
1560
1561         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1562                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1563
1564                 rsp->ring_index++;
1565                 if (rsp->ring_index == rsp->length) {
1566                         rsp->ring_index = 0;
1567                         rsp->ring_ptr = rsp->ring;
1568                 } else {
1569                         rsp->ring_ptr++;
1570                 }
1571
1572                 if (pkt->entry_status != 0) {
1573                         DEBUG3(printk(KERN_INFO
1574                             "scsi(%ld): Process error entry.\n", vha->host_no));
1575
1576                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1577                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1578                         wmb();
1579                         continue;
1580                 }
1581
1582                 switch (pkt->entry_type) {
1583                 case STATUS_TYPE:
1584                         qla2x00_status_entry(vha, rsp, pkt);
1585                         break;
1586                 case STATUS_CONT_TYPE:
1587                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1588                         break;
1589                 case VP_RPT_ID_IOCB_TYPE:
1590                         qla24xx_report_id_acquisition(vha,
1591                             (struct vp_rpt_id_entry_24xx *)pkt);
1592                         break;
1593                 default:
1594                         /* Type Not Supported. */
1595                         DEBUG4(printk(KERN_WARNING
1596                             "scsi(%ld): Received unknown response pkt type %x "
1597                             "entry status=%x.\n",
1598                             vha->host_no, pkt->entry_type, pkt->entry_status));
1599                         break;
1600                 }
1601                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1602                 wmb();
1603         }
1604
1605         /* Adjust ring index */
1606         WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
1607 }
1608
1609 static void
1610 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1611 {
1612         int rval;
1613         uint32_t cnt;
1614         struct qla_hw_data *ha = vha->hw;
1615         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1616
1617         if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
1618                 return;
1619
1620         rval = QLA_SUCCESS;
1621         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1622         RD_REG_DWORD(&reg->iobase_addr);
1623         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1624         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1625             rval == QLA_SUCCESS; cnt--) {
1626                 if (cnt) {
1627                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1628                         udelay(10);
1629                 } else
1630                         rval = QLA_FUNCTION_TIMEOUT;
1631         }
1632         if (rval == QLA_SUCCESS)
1633                 goto next_test;
1634
1635         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1636         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1637             rval == QLA_SUCCESS; cnt--) {
1638                 if (cnt) {
1639                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1640                         udelay(10);
1641                 } else
1642                         rval = QLA_FUNCTION_TIMEOUT;
1643         }
1644         if (rval != QLA_SUCCESS)
1645                 goto done;
1646
1647 next_test:
1648         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1649                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1650
1651 done:
1652         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1653         RD_REG_DWORD(&reg->iobase_window);
1654 }
1655
1656 /**
1657  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1658  * @irq:
1659  * @dev_id: SCSI driver HA context
1660  *
1661  * Called by system whenever the host adapter generates an interrupt.
1662  *
1663  * Returns handled flag.
1664  */
1665 irqreturn_t
1666 qla24xx_intr_handler(int irq, void *dev_id)
1667 {
1668         scsi_qla_host_t *vha;
1669         struct qla_hw_data *ha;
1670         struct device_reg_24xx __iomem *reg;
1671         int             status;
1672         unsigned long   iter;
1673         uint32_t        stat;
1674         uint32_t        hccr;
1675         uint16_t        mb[4];
1676         struct rsp_que *rsp;
1677         unsigned long   flags;
1678
1679         rsp = (struct rsp_que *) dev_id;
1680         if (!rsp) {
1681                 printk(KERN_INFO
1682                     "%s(): NULL response queue pointer\n", __func__);
1683                 return IRQ_NONE;
1684         }
1685
1686         ha = rsp->hw;
1687         reg = &ha->iobase->isp24;
1688         status = 0;
1689
1690         spin_lock_irqsave(&ha->hardware_lock, flags);
1691         vha = pci_get_drvdata(ha->pdev);
1692         for (iter = 50; iter--; ) {
1693                 stat = RD_REG_DWORD(&reg->host_status);
1694                 if (stat & HSRX_RISC_PAUSED) {
1695                         if (pci_channel_offline(ha->pdev))
1696                                 break;
1697
1698                         hccr = RD_REG_DWORD(&reg->hccr);
1699
1700                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1701                             "Dumping firmware!\n", hccr);
1702
1703                         qla2xxx_check_risc_status(vha);
1704
1705                         ha->isp_ops->fw_dump(vha, 1);
1706                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1707                         break;
1708                 } else if ((stat & HSRX_RISC_INT) == 0)
1709                         break;
1710
1711                 switch (stat & 0xff) {
1712                 case 0x1:
1713                 case 0x2:
1714                 case 0x10:
1715                 case 0x11:
1716                         qla24xx_mbx_completion(vha, MSW(stat));
1717                         status |= MBX_INTERRUPT;
1718
1719                         break;
1720                 case 0x12:
1721                         mb[0] = MSW(stat);
1722                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1723                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1724                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1725                         qla2x00_async_event(vha, rsp, mb);
1726                         break;
1727                 case 0x13:
1728                 case 0x14:
1729                         qla24xx_process_response_queue(vha, rsp);
1730                         break;
1731                 default:
1732                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1733                             "(%d).\n",
1734                             vha->host_no, stat & 0xff));
1735                         break;
1736                 }
1737                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1738                 RD_REG_DWORD_RELAXED(&reg->hccr);
1739         }
1740         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1741
1742         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1743             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1744                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1745                 complete(&ha->mbx_intr_comp);
1746         }
1747
1748         return IRQ_HANDLED;
1749 }
1750
1751 static irqreturn_t
1752 qla24xx_msix_rsp_q(int irq, void *dev_id)
1753 {
1754         struct qla_hw_data *ha;
1755         struct rsp_que *rsp;
1756         struct device_reg_24xx __iomem *reg;
1757         struct scsi_qla_host *vha;
1758
1759         rsp = (struct rsp_que *) dev_id;
1760         if (!rsp) {
1761                 printk(KERN_INFO
1762                 "%s(): NULL response queue pointer\n", __func__);
1763                 return IRQ_NONE;
1764         }
1765         ha = rsp->hw;
1766         reg = &ha->iobase->isp24;
1767
1768         spin_lock_irq(&ha->hardware_lock);
1769
1770         vha = qla25xx_get_host(rsp);
1771         qla24xx_process_response_queue(vha, rsp);
1772         if (!ha->mqenable) {
1773                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1774                 RD_REG_DWORD_RELAXED(&reg->hccr);
1775         }
1776         spin_unlock_irq(&ha->hardware_lock);
1777
1778         return IRQ_HANDLED;
1779 }
1780
1781 static irqreturn_t
1782 qla25xx_msix_rsp_q(int irq, void *dev_id)
1783 {
1784         struct qla_hw_data *ha;
1785         struct rsp_que *rsp;
1786
1787         rsp = (struct rsp_que *) dev_id;
1788         if (!rsp) {
1789                 printk(KERN_INFO
1790                         "%s(): NULL response queue pointer\n", __func__);
1791                 return IRQ_NONE;
1792         }
1793         ha = rsp->hw;
1794
1795         queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
1796
1797         return IRQ_HANDLED;
1798 }
1799
1800 static irqreturn_t
1801 qla24xx_msix_default(int irq, void *dev_id)
1802 {
1803         scsi_qla_host_t *vha;
1804         struct qla_hw_data *ha;
1805         struct rsp_que *rsp;
1806         struct device_reg_24xx __iomem *reg;
1807         int             status;
1808         uint32_t        stat;
1809         uint32_t        hccr;
1810         uint16_t        mb[4];
1811
1812         rsp = (struct rsp_que *) dev_id;
1813         if (!rsp) {
1814                 DEBUG(printk(
1815                 "%s(): NULL response queue pointer\n", __func__));
1816                 return IRQ_NONE;
1817         }
1818         ha = rsp->hw;
1819         reg = &ha->iobase->isp24;
1820         status = 0;
1821
1822         spin_lock_irq(&ha->hardware_lock);
1823         vha = pci_get_drvdata(ha->pdev);
1824         do {
1825                 stat = RD_REG_DWORD(&reg->host_status);
1826                 if (stat & HSRX_RISC_PAUSED) {
1827                         if (pci_channel_offline(ha->pdev))
1828                                 break;
1829
1830                         hccr = RD_REG_DWORD(&reg->hccr);
1831
1832                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1833                             "Dumping firmware!\n", hccr);
1834
1835                         qla2xxx_check_risc_status(vha);
1836
1837                         ha->isp_ops->fw_dump(vha, 1);
1838                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1839                         break;
1840                 } else if ((stat & HSRX_RISC_INT) == 0)
1841                         break;
1842
1843                 switch (stat & 0xff) {
1844                 case 0x1:
1845                 case 0x2:
1846                 case 0x10:
1847                 case 0x11:
1848                         qla24xx_mbx_completion(vha, MSW(stat));
1849                         status |= MBX_INTERRUPT;
1850
1851                         break;
1852                 case 0x12:
1853                         mb[0] = MSW(stat);
1854                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1855                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1856                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1857                         qla2x00_async_event(vha, rsp, mb);
1858                         break;
1859                 case 0x13:
1860                 case 0x14:
1861                         qla24xx_process_response_queue(vha, rsp);
1862                         break;
1863                 default:
1864                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1865                             "(%d).\n",
1866                             vha->host_no, stat & 0xff));
1867                         break;
1868                 }
1869                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1870         } while (0);
1871         spin_unlock_irq(&ha->hardware_lock);
1872
1873         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1874             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1875                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1876                 complete(&ha->mbx_intr_comp);
1877         }
1878
1879         return IRQ_HANDLED;
1880 }
1881
1882 /* Interrupt handling helpers. */
1883
1884 struct qla_init_msix_entry {
1885         const char *name;
1886         irq_handler_t handler;
1887 };
1888
1889 static struct qla_init_msix_entry msix_entries[3] = {
1890         { "qla2xxx (default)", qla24xx_msix_default },
1891         { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1892         { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
1893 };
1894
1895 static void
1896 qla24xx_disable_msix(struct qla_hw_data *ha)
1897 {
1898         int i;
1899         struct qla_msix_entry *qentry;
1900
1901         for (i = 0; i < ha->msix_count; i++) {
1902                 qentry = &ha->msix_entries[i];
1903                 if (qentry->have_irq)
1904                         free_irq(qentry->vector, qentry->rsp);
1905         }
1906         pci_disable_msix(ha->pdev);
1907         kfree(ha->msix_entries);
1908         ha->msix_entries = NULL;
1909         ha->flags.msix_enabled = 0;
1910 }
1911
1912 static int
1913 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1914 {
1915 #define MIN_MSIX_COUNT  2
1916         int i, ret;
1917         struct msix_entry *entries;
1918         struct qla_msix_entry *qentry;
1919
1920         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1921                                         GFP_KERNEL);
1922         if (!entries)
1923                 return -ENOMEM;
1924
1925         for (i = 0; i < ha->msix_count; i++)
1926                 entries[i].entry = i;
1927
1928         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1929         if (ret) {
1930                 if (ret < MIN_MSIX_COUNT)
1931                         goto msix_failed;
1932
1933                 qla_printk(KERN_WARNING, ha,
1934                         "MSI-X: Failed to enable support -- %d/%d\n"
1935                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
1936                 ha->msix_count = ret;
1937                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1938                 if (ret) {
1939 msix_failed:
1940                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1941                                 " support, giving up -- %d/%d\n",
1942                                 ha->msix_count, ret);
1943                         goto msix_out;
1944                 }
1945                 ha->max_rsp_queues = ha->msix_count - 1;
1946         }
1947         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1948                                 ha->msix_count, GFP_KERNEL);
1949         if (!ha->msix_entries) {
1950                 ret = -ENOMEM;
1951                 goto msix_out;
1952         }
1953         ha->flags.msix_enabled = 1;
1954
1955         for (i = 0; i < ha->msix_count; i++) {
1956                 qentry = &ha->msix_entries[i];
1957                 qentry->vector = entries[i].vector;
1958                 qentry->entry = entries[i].entry;
1959                 qentry->have_irq = 0;
1960                 qentry->rsp = NULL;
1961         }
1962
1963         /* Enable MSI-X vectors for the base queue */
1964         for (i = 0; i < 2; i++) {
1965                 qentry = &ha->msix_entries[i];
1966                 ret = request_irq(qentry->vector, msix_entries[i].handler,
1967                                         0, msix_entries[i].name, rsp);
1968                 if (ret) {
1969                         qla_printk(KERN_WARNING, ha,
1970                         "MSI-X: Unable to register handler -- %x/%d.\n",
1971                         qentry->vector, ret);
1972                         qla24xx_disable_msix(ha);
1973                         ha->mqenable = 0;
1974                         goto msix_out;
1975                 }
1976                 qentry->have_irq = 1;
1977                 qentry->rsp = rsp;
1978                 rsp->msix = qentry;
1979         }
1980
1981         /* Enable MSI-X vector for response queue update for queue 0 */
1982         if (ha->mqiobase &&  (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
1983                 ha->mqenable = 1;
1984
1985 msix_out:
1986         kfree(entries);
1987         return ret;
1988 }
1989
1990 int
1991 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1992 {
1993         int ret;
1994         device_reg_t __iomem *reg = ha->iobase;
1995
1996         /* If possible, enable MSI-X. */
1997         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
1998             !IS_QLA8432(ha) && !IS_QLA8001(ha))
1999                 goto skip_msix;
2000
2001         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2002                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2003                 DEBUG2(qla_printk(KERN_WARNING, ha,
2004                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
2005                         ha->pdev->revision, ha->fw_attributes));
2006
2007                 goto skip_msix;
2008         }
2009
2010         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2011             (ha->pdev->subsystem_device == 0x7040 ||
2012                 ha->pdev->subsystem_device == 0x7041 ||
2013                 ha->pdev->subsystem_device == 0x1705)) {
2014                 DEBUG2(qla_printk(KERN_WARNING, ha,
2015                     "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
2016                     ha->pdev->subsystem_vendor,
2017                     ha->pdev->subsystem_device));
2018
2019                 goto skip_msi;
2020         }
2021
2022         ret = qla24xx_enable_msix(ha, rsp);
2023         if (!ret) {
2024                 DEBUG2(qla_printk(KERN_INFO, ha,
2025                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
2026                     ha->fw_attributes));
2027                 goto clear_risc_ints;
2028         }
2029         qla_printk(KERN_WARNING, ha,
2030             "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
2031 skip_msix:
2032
2033         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2034             !IS_QLA8001(ha))
2035                 goto skip_msi;
2036
2037         ret = pci_enable_msi(ha->pdev);
2038         if (!ret) {
2039                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
2040                 ha->flags.msi_enabled = 1;
2041         }
2042 skip_msi:
2043
2044         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2045             IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
2046         if (ret) {
2047                 qla_printk(KERN_WARNING, ha,
2048                     "Failed to reserve interrupt %d already in use.\n",
2049                     ha->pdev->irq);
2050                 goto fail;
2051         }
2052         ha->flags.inta_enabled = 1;
2053 clear_risc_ints:
2054
2055         /*
2056          * FIXME: Noted that 8014s were being dropped during NK testing.
2057          * Timing deltas during MSI-X/INTa transitions?
2058          */
2059         if (IS_QLA81XX(ha))
2060                 goto fail;
2061         spin_lock_irq(&ha->hardware_lock);
2062         if (IS_FWI2_CAPABLE(ha)) {
2063                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2064                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2065         } else {
2066                 WRT_REG_WORD(&reg->isp.semaphore, 0);
2067                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2068                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2069         }
2070         spin_unlock_irq(&ha->hardware_lock);
2071
2072 fail:
2073         return ret;
2074 }
2075
2076 void
2077 qla2x00_free_irqs(scsi_qla_host_t *vha)
2078 {
2079         struct qla_hw_data *ha = vha->hw;
2080         struct rsp_que *rsp = ha->rsp_q_map[0];
2081
2082         if (ha->flags.msix_enabled)
2083                 qla24xx_disable_msix(ha);
2084         else if (ha->flags.inta_enabled) {
2085                 free_irq(ha->pdev->irq, rsp);
2086                 pci_disable_msi(ha->pdev);
2087         }
2088 }
2089
2090
2091 int qla25xx_request_irq(struct rsp_que *rsp)
2092 {
2093         struct qla_hw_data *ha = rsp->hw;
2094         struct qla_init_msix_entry *intr = &msix_entries[2];
2095         struct qla_msix_entry *msix = rsp->msix;
2096         int ret;
2097
2098         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2099         if (ret) {
2100                 qla_printk(KERN_WARNING, ha,
2101                         "MSI-X: Unable to register handler -- %x/%d.\n",
2102                         msix->vector, ret);
2103                 return ret;
2104         }
2105         msix->have_irq = 1;
2106         msix->rsp = rsp;
2107         return ret;
2108 }
2109
2110 struct scsi_qla_host *
2111 qla25xx_get_host(struct rsp_que *rsp)
2112 {
2113         srb_t *sp;
2114         struct qla_hw_data *ha = rsp->hw;
2115         struct scsi_qla_host *vha = NULL;
2116         struct sts_entry_24xx *pkt;
2117         struct req_que *req;
2118         uint16_t que;
2119         uint32_t handle;
2120
2121         pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2122         que = MSW(pkt->handle);
2123         handle = (uint32_t) LSW(pkt->handle);
2124         req = ha->req_q_map[que];
2125         if (handle < MAX_OUTSTANDING_COMMANDS) {
2126                 sp = req->outstanding_cmds[handle];
2127                 if (sp)
2128                         return  sp->fcport->vha;
2129                 else
2130                         goto base_que;
2131         }
2132 base_que:
2133         vha = pci_get_drvdata(ha->pdev);
2134         return vha;
2135 }