]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/scsi/qla2xxx/qla_isr.c
rcu: rcu_read_lock_bh_held(): disabling irqs also disables bh
[net-next-2.6.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <scsi/scsi_tcq.h>
12 #include <scsi/scsi_bsg_fc.h>
13 #include <scsi/scsi_eh.h>
14
15 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
16 static void qla2x00_process_completed_request(struct scsi_qla_host *,
17         struct req_que *, uint32_t);
18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21         sts_entry_t *);
22
23 /**
24  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
25  * @irq:
26  * @dev_id: SCSI driver HA context
27  *
28  * Called by system whenever the host adapter generates an interrupt.
29  *
30  * Returns handled flag.
31  */
32 irqreturn_t
33 qla2100_intr_handler(int irq, void *dev_id)
34 {
35         scsi_qla_host_t *vha;
36         struct qla_hw_data *ha;
37         struct device_reg_2xxx __iomem *reg;
38         int             status;
39         unsigned long   iter;
40         uint16_t        hccr;
41         uint16_t        mb[4];
42         struct rsp_que *rsp;
43         unsigned long   flags;
44
45         rsp = (struct rsp_que *) dev_id;
46         if (!rsp) {
47                 printk(KERN_INFO
48                     "%s(): NULL response queue pointer\n", __func__);
49                 return (IRQ_NONE);
50         }
51
52         ha = rsp->hw;
53         reg = &ha->iobase->isp;
54         status = 0;
55
56         spin_lock_irqsave(&ha->hardware_lock, flags);
57         vha = pci_get_drvdata(ha->pdev);
58         for (iter = 50; iter--; ) {
59                 hccr = RD_REG_WORD(&reg->hccr);
60                 if (hccr & HCCR_RISC_PAUSE) {
61                         if (pci_channel_offline(ha->pdev))
62                                 break;
63
64                         /*
65                          * Issue a "HARD" reset in order for the RISC interrupt
66                          * bit to be cleared.  Schedule a big hammmer to get
67                          * out of the RISC PAUSED state.
68                          */
69                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
70                         RD_REG_WORD(&reg->hccr);
71
72                         ha->isp_ops->fw_dump(vha, 1);
73                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
74                         break;
75                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
76                         break;
77
78                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
79                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
80                         RD_REG_WORD(&reg->hccr);
81
82                         /* Get mailbox data. */
83                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
84                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
85                                 qla2x00_mbx_completion(vha, mb[0]);
86                                 status |= MBX_INTERRUPT;
87                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
88                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
89                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
90                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
91                                 qla2x00_async_event(vha, rsp, mb);
92                         } else {
93                                 /*EMPTY*/
94                                 DEBUG2(printk("scsi(%ld): Unrecognized "
95                                     "interrupt type (%d).\n",
96                                     vha->host_no, mb[0]));
97                         }
98                         /* Release mailbox registers. */
99                         WRT_REG_WORD(&reg->semaphore, 0);
100                         RD_REG_WORD(&reg->semaphore);
101                 } else {
102                         qla2x00_process_response_queue(rsp);
103
104                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
105                         RD_REG_WORD(&reg->hccr);
106                 }
107         }
108         spin_unlock_irqrestore(&ha->hardware_lock, flags);
109
110         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
111             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
112                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
113                 complete(&ha->mbx_intr_comp);
114         }
115
116         return (IRQ_HANDLED);
117 }
118
119 /**
120  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
121  * @irq:
122  * @dev_id: SCSI driver HA context
123  *
124  * Called by system whenever the host adapter generates an interrupt.
125  *
126  * Returns handled flag.
127  */
128 irqreturn_t
129 qla2300_intr_handler(int irq, void *dev_id)
130 {
131         scsi_qla_host_t *vha;
132         struct device_reg_2xxx __iomem *reg;
133         int             status;
134         unsigned long   iter;
135         uint32_t        stat;
136         uint16_t        hccr;
137         uint16_t        mb[4];
138         struct rsp_que *rsp;
139         struct qla_hw_data *ha;
140         unsigned long   flags;
141
142         rsp = (struct rsp_que *) dev_id;
143         if (!rsp) {
144                 printk(KERN_INFO
145                     "%s(): NULL response queue pointer\n", __func__);
146                 return (IRQ_NONE);
147         }
148
149         ha = rsp->hw;
150         reg = &ha->iobase->isp;
151         status = 0;
152
153         spin_lock_irqsave(&ha->hardware_lock, flags);
154         vha = pci_get_drvdata(ha->pdev);
155         for (iter = 50; iter--; ) {
156                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
157                 if (stat & HSR_RISC_PAUSED) {
158                         if (unlikely(pci_channel_offline(ha->pdev)))
159                                 break;
160
161                         hccr = RD_REG_WORD(&reg->hccr);
162                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
163                                 qla_printk(KERN_INFO, ha, "Parity error -- "
164                                     "HCCR=%x, Dumping firmware!\n", hccr);
165                         else
166                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
167                                     "HCCR=%x, Dumping firmware!\n", hccr);
168
169                         /*
170                          * Issue a "HARD" reset in order for the RISC
171                          * interrupt bit to be cleared.  Schedule a big
172                          * hammmer to get out of the RISC PAUSED state.
173                          */
174                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
175                         RD_REG_WORD(&reg->hccr);
176
177                         ha->isp_ops->fw_dump(vha, 1);
178                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
179                         break;
180                 } else if ((stat & HSR_RISC_INT) == 0)
181                         break;
182
183                 switch (stat & 0xff) {
184                 case 0x1:
185                 case 0x2:
186                 case 0x10:
187                 case 0x11:
188                         qla2x00_mbx_completion(vha, MSW(stat));
189                         status |= MBX_INTERRUPT;
190
191                         /* Release mailbox registers. */
192                         WRT_REG_WORD(&reg->semaphore, 0);
193                         break;
194                 case 0x12:
195                         mb[0] = MSW(stat);
196                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
197                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
198                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
199                         qla2x00_async_event(vha, rsp, mb);
200                         break;
201                 case 0x13:
202                         qla2x00_process_response_queue(rsp);
203                         break;
204                 case 0x15:
205                         mb[0] = MBA_CMPLT_1_16BIT;
206                         mb[1] = MSW(stat);
207                         qla2x00_async_event(vha, rsp, mb);
208                         break;
209                 case 0x16:
210                         mb[0] = MBA_SCSI_COMPLETION;
211                         mb[1] = MSW(stat);
212                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
213                         qla2x00_async_event(vha, rsp, mb);
214                         break;
215                 default:
216                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
217                             "(%d).\n",
218                             vha->host_no, stat & 0xff));
219                         break;
220                 }
221                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
222                 RD_REG_WORD_RELAXED(&reg->hccr);
223         }
224         spin_unlock_irqrestore(&ha->hardware_lock, flags);
225
226         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
227             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
228                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
229                 complete(&ha->mbx_intr_comp);
230         }
231
232         return (IRQ_HANDLED);
233 }
234
235 /**
236  * qla2x00_mbx_completion() - Process mailbox command completions.
237  * @ha: SCSI driver HA context
238  * @mb0: Mailbox0 register
239  */
240 static void
241 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
242 {
243         uint16_t        cnt;
244         uint16_t __iomem *wptr;
245         struct qla_hw_data *ha = vha->hw;
246         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
247
248         /* Load return mailbox registers. */
249         ha->flags.mbox_int = 1;
250         ha->mailbox_out[0] = mb0;
251         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
252
253         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
254                 if (IS_QLA2200(ha) && cnt == 8)
255                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
256                 if (cnt == 4 || cnt == 5)
257                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
258                 else
259                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
260
261                 wptr++;
262         }
263
264         if (ha->mcp) {
265                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
266                     __func__, vha->host_no, ha->mcp->mb[0]));
267         } else {
268                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
269                     __func__, vha->host_no));
270         }
271 }
272
273 static void
274 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
275 {
276         static char *event[] =
277                 { "Complete", "Request Notification", "Time Extension" };
278         int rval;
279         struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
280         uint16_t __iomem *wptr;
281         uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
282
283         /* Seed data -- mailbox1 -> mailbox7. */
284         wptr = (uint16_t __iomem *)&reg24->mailbox1;
285         for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
286                 mb[cnt] = RD_REG_WORD(wptr);
287
288         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
289             "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no,
290             event[aen & 0xff],
291             mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6]));
292
293         /* Acknowledgement needed? [Notify && non-zero timeout]. */
294         timeout = (descr >> 8) & 0xf;
295         if (aen != MBA_IDC_NOTIFY || !timeout)
296                 return;
297
298         DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
299             "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout));
300
301         rval = qla2x00_post_idc_ack_work(vha, mb);
302         if (rval != QLA_SUCCESS)
303                 qla_printk(KERN_WARNING, vha->hw,
304                     "IDC failed to post ACK.\n");
305 }
306
307 /**
308  * qla2x00_async_event() - Process aynchronous events.
309  * @ha: SCSI driver HA context
310  * @mb: Mailbox registers (0 - 3)
311  */
312 void
313 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
314 {
315 #define LS_UNKNOWN      2
316         static char     *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
317         char            *link_speed;
318         uint16_t        handle_cnt;
319         uint16_t        cnt, mbx;
320         uint32_t        handles[5];
321         struct qla_hw_data *ha = vha->hw;
322         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
323         struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
324         uint32_t        rscn_entry, host_pid;
325         uint8_t         rscn_queue_index;
326         unsigned long   flags;
327
328         /* Setup to process RIO completion. */
329         handle_cnt = 0;
330         if (IS_QLA8XXX_TYPE(ha))
331                 goto skip_rio;
332         switch (mb[0]) {
333         case MBA_SCSI_COMPLETION:
334                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
335                 handle_cnt = 1;
336                 break;
337         case MBA_CMPLT_1_16BIT:
338                 handles[0] = mb[1];
339                 handle_cnt = 1;
340                 mb[0] = MBA_SCSI_COMPLETION;
341                 break;
342         case MBA_CMPLT_2_16BIT:
343                 handles[0] = mb[1];
344                 handles[1] = mb[2];
345                 handle_cnt = 2;
346                 mb[0] = MBA_SCSI_COMPLETION;
347                 break;
348         case MBA_CMPLT_3_16BIT:
349                 handles[0] = mb[1];
350                 handles[1] = mb[2];
351                 handles[2] = mb[3];
352                 handle_cnt = 3;
353                 mb[0] = MBA_SCSI_COMPLETION;
354                 break;
355         case MBA_CMPLT_4_16BIT:
356                 handles[0] = mb[1];
357                 handles[1] = mb[2];
358                 handles[2] = mb[3];
359                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
360                 handle_cnt = 4;
361                 mb[0] = MBA_SCSI_COMPLETION;
362                 break;
363         case MBA_CMPLT_5_16BIT:
364                 handles[0] = mb[1];
365                 handles[1] = mb[2];
366                 handles[2] = mb[3];
367                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
368                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
369                 handle_cnt = 5;
370                 mb[0] = MBA_SCSI_COMPLETION;
371                 break;
372         case MBA_CMPLT_2_32BIT:
373                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
374                 handles[1] = le32_to_cpu(
375                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
376                     RD_MAILBOX_REG(ha, reg, 6));
377                 handle_cnt = 2;
378                 mb[0] = MBA_SCSI_COMPLETION;
379                 break;
380         default:
381                 break;
382         }
383 skip_rio:
384         switch (mb[0]) {
385         case MBA_SCSI_COMPLETION:       /* Fast Post */
386                 if (!vha->flags.online)
387                         break;
388
389                 for (cnt = 0; cnt < handle_cnt; cnt++)
390                         qla2x00_process_completed_request(vha, rsp->req,
391                                 handles[cnt]);
392                 break;
393
394         case MBA_RESET:                 /* Reset */
395                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
396                         vha->host_no));
397
398                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
399                 break;
400
401         case MBA_SYSTEM_ERR:            /* System Error */
402                 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
403                 qla_printk(KERN_INFO, ha,
404                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
405                     "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
406
407                 ha->isp_ops->fw_dump(vha, 1);
408
409                 if (IS_FWI2_CAPABLE(ha)) {
410                         if (mb[1] == 0 && mb[2] == 0) {
411                                 qla_printk(KERN_ERR, ha,
412                                     "Unrecoverable Hardware Error: adapter "
413                                     "marked OFFLINE!\n");
414                                 vha->flags.online = 0;
415                         } else
416                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
417                 } else if (mb[1] == 0) {
418                         qla_printk(KERN_INFO, ha,
419                             "Unrecoverable Hardware Error: adapter marked "
420                             "OFFLINE!\n");
421                         vha->flags.online = 0;
422                 } else
423                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
424                 break;
425
426         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
427                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error (%x).\n",
428                     vha->host_no, mb[1]));
429                 qla_printk(KERN_WARNING, ha,
430                     "ISP Request Transfer Error (%x).\n", mb[1]);
431
432                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
433                 break;
434
435         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
436                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
437                     vha->host_no));
438                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
439
440                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
441                 break;
442
443         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
444                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
445                     vha->host_no));
446                 break;
447
448         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
449                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
450                     mb[1]));
451                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
452
453                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
454                         atomic_set(&vha->loop_state, LOOP_DOWN);
455                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
456                         qla2x00_mark_all_devices_lost(vha, 1);
457                 }
458
459                 if (vha->vp_idx) {
460                         atomic_set(&vha->vp_state, VP_FAILED);
461                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
462                 }
463
464                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
465                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
466
467                 vha->flags.management_server_logged_in = 0;
468                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
469                 break;
470
471         case MBA_LOOP_UP:               /* Loop Up Event */
472                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
473                         link_speed = link_speeds[0];
474                         ha->link_data_rate = PORT_SPEED_1GB;
475                 } else {
476                         link_speed = link_speeds[LS_UNKNOWN];
477                         if (mb[1] < 5)
478                                 link_speed = link_speeds[mb[1]];
479                         else if (mb[1] == 0x13)
480                                 link_speed = link_speeds[5];
481                         ha->link_data_rate = mb[1];
482                 }
483
484                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
485                     vha->host_no, link_speed));
486                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
487                     link_speed);
488
489                 vha->flags.management_server_logged_in = 0;
490                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
491                 break;
492
493         case MBA_LOOP_DOWN:             /* Loop Down Event */
494                 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
495                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
496                     "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3],
497                     mbx));
498                 qla_printk(KERN_INFO, ha,
499                     "LOOP DOWN detected (%x %x %x %x).\n", mb[1], mb[2], mb[3],
500                     mbx);
501
502                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
503                         atomic_set(&vha->loop_state, LOOP_DOWN);
504                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
505                         vha->device_flags |= DFLG_NO_CABLE;
506                         qla2x00_mark_all_devices_lost(vha, 1);
507                 }
508
509                 if (vha->vp_idx) {
510                         atomic_set(&vha->vp_state, VP_FAILED);
511                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
512                 }
513
514                 vha->flags.management_server_logged_in = 0;
515                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
516                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
517                 break;
518
519         case MBA_LIP_RESET:             /* LIP reset occurred */
520                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
521                     vha->host_no, mb[1]));
522                 qla_printk(KERN_INFO, ha,
523                     "LIP reset occurred (%x).\n", mb[1]);
524
525                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
526                         atomic_set(&vha->loop_state, LOOP_DOWN);
527                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
528                         qla2x00_mark_all_devices_lost(vha, 1);
529                 }
530
531                 if (vha->vp_idx) {
532                         atomic_set(&vha->vp_state, VP_FAILED);
533                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
534                 }
535
536                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
537
538                 ha->operating_mode = LOOP;
539                 vha->flags.management_server_logged_in = 0;
540                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
541                 break;
542
543         /* case MBA_DCBX_COMPLETE: */
544         case MBA_POINT_TO_POINT:        /* Point-to-Point */
545                 if (IS_QLA2100(ha))
546                         break;
547
548                 if (IS_QLA8XXX_TYPE(ha)) {
549                         DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
550                             "%04x\n", vha->host_no, mb[1], mb[2], mb[3]));
551                         if (ha->notify_dcbx_comp)
552                                 complete(&ha->dcbx_comp);
553
554                 } else
555                         DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
556                             "received.\n", vha->host_no));
557
558                 /*
559                  * Until there's a transition from loop down to loop up, treat
560                  * this as loop down only.
561                  */
562                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
563                         atomic_set(&vha->loop_state, LOOP_DOWN);
564                         if (!atomic_read(&vha->loop_down_timer))
565                                 atomic_set(&vha->loop_down_timer,
566                                     LOOP_DOWN_TIME);
567                         qla2x00_mark_all_devices_lost(vha, 1);
568                 }
569
570                 if (vha->vp_idx) {
571                         atomic_set(&vha->vp_state, VP_FAILED);
572                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
573                 }
574
575                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
576                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
577
578                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
579                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
580
581                 ha->flags.gpsc_supported = 1;
582                 vha->flags.management_server_logged_in = 0;
583                 break;
584
585         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
586                 if (IS_QLA2100(ha))
587                         break;
588
589                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
590                     "received.\n",
591                     vha->host_no));
592                 qla_printk(KERN_INFO, ha,
593                     "Configuration change detected: value=%x.\n", mb[1]);
594
595                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
596                         atomic_set(&vha->loop_state, LOOP_DOWN);
597                         if (!atomic_read(&vha->loop_down_timer))
598                                 atomic_set(&vha->loop_down_timer,
599                                     LOOP_DOWN_TIME);
600                         qla2x00_mark_all_devices_lost(vha, 1);
601                 }
602
603                 if (vha->vp_idx) {
604                         atomic_set(&vha->vp_state, VP_FAILED);
605                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
606                 }
607
608                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
609                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
610                 break;
611
612         case MBA_PORT_UPDATE:           /* Port database update */
613                 /*
614                  * Handle only global and vn-port update events
615                  *
616                  * Relevant inputs:
617                  * mb[1] = N_Port handle of changed port
618                  * OR 0xffff for global event
619                  * mb[2] = New login state
620                  * 7 = Port logged out
621                  * mb[3] = LSB is vp_idx, 0xff = all vps
622                  *
623                  * Skip processing if:
624                  *       Event is global, vp_idx is NOT all vps,
625                  *           vp_idx does not match
626                  *       Event is not global, vp_idx does not match
627                  */
628                 if (IS_QLA2XXX_MIDTYPE(ha) &&
629                     ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
630                         (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
631                         break;
632
633                 /* Global event -- port logout or port unavailable. */
634                 if (mb[1] == 0xffff && mb[2] == 0x7) {
635                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
636                             vha->host_no));
637                         DEBUG(printk(KERN_INFO
638                             "scsi(%ld): Port unavailable %04x %04x %04x.\n",
639                             vha->host_no, mb[1], mb[2], mb[3]));
640
641                         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
642                                 atomic_set(&vha->loop_state, LOOP_DOWN);
643                                 atomic_set(&vha->loop_down_timer,
644                                     LOOP_DOWN_TIME);
645                                 vha->device_flags |= DFLG_NO_CABLE;
646                                 qla2x00_mark_all_devices_lost(vha, 1);
647                         }
648
649                         if (vha->vp_idx) {
650                                 atomic_set(&vha->vp_state, VP_FAILED);
651                                 fc_vport_set_state(vha->fc_vport,
652                                     FC_VPORT_FAILED);
653                                 qla2x00_mark_all_devices_lost(vha, 1);
654                         }
655
656                         vha->flags.management_server_logged_in = 0;
657                         ha->link_data_rate = PORT_SPEED_UNKNOWN;
658                         break;
659                 }
660
661                 /*
662                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
663                  * event etc. earlier indicating loop is down) then process
664                  * it.  Otherwise ignore it and Wait for RSCN to come in.
665                  */
666                 atomic_set(&vha->loop_down_timer, 0);
667                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
668                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
669                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
670                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
671                             mb[2], mb[3]));
672                         break;
673                 }
674
675                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
676                     vha->host_no));
677                 DEBUG(printk(KERN_INFO
678                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
679                     vha->host_no, mb[1], mb[2], mb[3]));
680
681                 /*
682                  * Mark all devices as missing so we will login again.
683                  */
684                 atomic_set(&vha->loop_state, LOOP_UP);
685
686                 qla2x00_mark_all_devices_lost(vha, 1);
687
688                 vha->flags.rscn_queue_overflow = 1;
689
690                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
691                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
692                 break;
693
694         case MBA_RSCN_UPDATE:           /* State Change Registration */
695                 /* Check if the Vport has issued a SCR */
696                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
697                         break;
698                 /* Only handle SCNs for our Vport index. */
699                 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
700                         break;
701
702                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
703                     vha->host_no));
704                 DEBUG(printk(KERN_INFO
705                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
706                     vha->host_no, mb[1], mb[2], mb[3]));
707
708                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
709                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
710                                 | vha->d_id.b.al_pa;
711                 if (rscn_entry == host_pid) {
712                         DEBUG(printk(KERN_INFO
713                             "scsi(%ld): Ignoring RSCN update to local host "
714                             "port ID (%06x)\n",
715                             vha->host_no, host_pid));
716                         break;
717                 }
718
719                 /* Ignore reserved bits from RSCN-payload. */
720                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
721                 rscn_queue_index = vha->rscn_in_ptr + 1;
722                 if (rscn_queue_index == MAX_RSCN_COUNT)
723                         rscn_queue_index = 0;
724                 if (rscn_queue_index != vha->rscn_out_ptr) {
725                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
726                         vha->rscn_in_ptr = rscn_queue_index;
727                 } else {
728                         vha->flags.rscn_queue_overflow = 1;
729                 }
730
731                 atomic_set(&vha->loop_state, LOOP_UPDATE);
732                 atomic_set(&vha->loop_down_timer, 0);
733                 vha->flags.management_server_logged_in = 0;
734
735                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
736                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
737                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
738                 break;
739
740         /* case MBA_RIO_RESPONSE: */
741         case MBA_ZIO_RESPONSE:
742                 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
743                     vha->host_no));
744
745                 if (IS_FWI2_CAPABLE(ha))
746                         qla24xx_process_response_queue(vha, rsp);
747                 else
748                         qla2x00_process_response_queue(rsp);
749                 break;
750
751         case MBA_DISCARD_RND_FRAME:
752                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
753                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
754                 break;
755
756         case MBA_TRACE_NOTIFICATION:
757                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
758                 vha->host_no, mb[1], mb[2]));
759                 break;
760
761         case MBA_ISP84XX_ALERT:
762                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
763                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
764
765                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
766                 switch (mb[1]) {
767                 case A84_PANIC_RECOVERY:
768                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
769                             "%04x %04x\n", mb[2], mb[3]);
770                         break;
771                 case A84_OP_LOGIN_COMPLETE:
772                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
773                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
774                             "firmware version %x\n", ha->cs84xx->op_fw_version));
775                         break;
776                 case A84_DIAG_LOGIN_COMPLETE:
777                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
778                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
779                             "diagnostic firmware version %x\n",
780                             ha->cs84xx->diag_fw_version));
781                         break;
782                 case A84_GOLD_LOGIN_COMPLETE:
783                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
784                         ha->cs84xx->fw_update = 1;
785                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
786                             "firmware version %x\n",
787                             ha->cs84xx->gold_fw_version));
788                         break;
789                 default:
790                         qla_printk(KERN_ERR, ha,
791                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
792                             mb[1], mb[2], mb[3]);
793                 }
794                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
795                 break;
796         case MBA_DCBX_START:
797                 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
798                     vha->host_no, mb[1], mb[2], mb[3]));
799                 break;
800         case MBA_DCBX_PARAM_UPDATE:
801                 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
802                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
803                 break;
804         case MBA_FCF_CONF_ERR:
805                 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
806                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
807                 break;
808         case MBA_IDC_COMPLETE:
809         case MBA_IDC_NOTIFY:
810         case MBA_IDC_TIME_EXT:
811                 qla81xx_idc_event(vha, mb[0], mb[1]);
812                 break;
813         }
814
815         if (!vha->vp_idx && ha->num_vhosts)
816                 qla2x00_alert_all_vps(rsp, mb);
817 }
818
819 /**
820  * qla2x00_process_completed_request() - Process a Fast Post response.
821  * @ha: SCSI driver HA context
822  * @index: SRB index
823  */
824 static void
825 qla2x00_process_completed_request(struct scsi_qla_host *vha,
826                                 struct req_que *req, uint32_t index)
827 {
828         srb_t *sp;
829         struct qla_hw_data *ha = vha->hw;
830
831         /* Validate handle. */
832         if (index >= MAX_OUTSTANDING_COMMANDS) {
833                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
834                     vha->host_no, index));
835                 qla_printk(KERN_WARNING, ha,
836                     "Invalid SCSI completion handle %d.\n", index);
837
838                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
839                 return;
840         }
841
842         sp = req->outstanding_cmds[index];
843         if (sp) {
844                 /* Free outstanding command slot. */
845                 req->outstanding_cmds[index] = NULL;
846
847                 /* Save ISP completion status */
848                 sp->cmd->result = DID_OK << 16;
849                 qla2x00_sp_compl(ha, sp);
850         } else {
851                 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
852                         " handle(0x%x)\n", vha->host_no, req->id, index));
853                 qla_printk(KERN_WARNING, ha,
854                     "Invalid ISP SCSI completion handle\n");
855
856                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
857         }
858 }
859
860 static srb_t *
861 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
862     struct req_que *req, void *iocb)
863 {
864         struct qla_hw_data *ha = vha->hw;
865         sts_entry_t *pkt = iocb;
866         srb_t *sp = NULL;
867         uint16_t index;
868
869         index = LSW(pkt->handle);
870         if (index >= MAX_OUTSTANDING_COMMANDS) {
871                 qla_printk(KERN_WARNING, ha,
872                     "%s: Invalid completion handle (%x).\n", func, index);
873                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
874                 goto done;
875         }
876         sp = req->outstanding_cmds[index];
877         if (!sp) {
878                 qla_printk(KERN_WARNING, ha,
879                     "%s: Invalid completion handle (%x) -- timed-out.\n", func,
880                     index);
881                 return sp;
882         }
883         if (sp->handle != index) {
884                 qla_printk(KERN_WARNING, ha,
885                     "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle,
886                     index);
887                 return NULL;
888         }
889
890         req->outstanding_cmds[index] = NULL;
891
892 done:
893         return sp;
894 }
895
896 static void
897 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
898     struct mbx_entry *mbx)
899 {
900         const char func[] = "MBX-IOCB";
901         const char *type;
902         fc_port_t *fcport;
903         srb_t *sp;
904         struct srb_iocb *lio;
905         struct srb_ctx *ctx;
906         uint16_t *data;
907         uint16_t status;
908
909         sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
910         if (!sp)
911                 return;
912
913         ctx = sp->ctx;
914         lio = ctx->u.iocb_cmd;
915         type = ctx->name;
916         fcport = sp->fcport;
917         data = lio->u.logio.data;
918
919         data[0] = MBS_COMMAND_ERROR;
920         data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
921             QLA_LOGIO_LOGIN_RETRIED : 0;
922         if (mbx->entry_status) {
923                 DEBUG2(printk(KERN_WARNING
924                     "scsi(%ld:%x): Async-%s error entry - portid=%02x%02x%02x "
925                     "entry-status=%x status=%x state-flag=%x "
926                     "status-flags=%x.\n",
927                     fcport->vha->host_no, sp->handle, type,
928                     fcport->d_id.b.domain, fcport->d_id.b.area,
929                     fcport->d_id.b.al_pa, mbx->entry_status,
930                     le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
931                     le16_to_cpu(mbx->status_flags)));
932
933                 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
934
935                 goto logio_done;
936         }
937
938         status = le16_to_cpu(mbx->status);
939         if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
940             le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
941                 status = 0;
942         if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
943                 DEBUG2(printk(KERN_DEBUG
944                     "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
945                     "mbx1=%x.\n",
946                     fcport->vha->host_no, sp->handle, type,
947                     fcport->d_id.b.domain, fcport->d_id.b.area,
948                     fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1)));
949
950                 data[0] = MBS_COMMAND_COMPLETE;
951                 if (ctx->type == SRB_LOGIN_CMD) {
952                         fcport->port_type = FCT_TARGET;
953                         if (le16_to_cpu(mbx->mb1) & BIT_0)
954                                 fcport->port_type = FCT_INITIATOR;
955                         else if (le16_to_cpu(mbx->mb1) & BIT_1)
956                                 fcport->flags |= FCF_FCP2_DEVICE;
957                 }
958                 goto logio_done;
959         }
960
961         data[0] = le16_to_cpu(mbx->mb0);
962         switch (data[0]) {
963         case MBS_PORT_ID_USED:
964                 data[1] = le16_to_cpu(mbx->mb1);
965                 break;
966         case MBS_LOOP_ID_USED:
967                 break;
968         default:
969                 data[0] = MBS_COMMAND_ERROR;
970                 break;
971         }
972
973         DEBUG2(printk(KERN_WARNING
974             "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x status=%x "
975             "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
976             fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
977             fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
978             le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
979             le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
980             le16_to_cpu(mbx->mb7)));
981
982 logio_done:
983         lio->done(sp);
984 }
985
986 static void
987 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
988     sts_entry_t *pkt, int iocb_type)
989 {
990         const char func[] = "CT_IOCB";
991         const char *type;
992         struct qla_hw_data *ha = vha->hw;
993         srb_t *sp;
994         struct srb_ctx *sp_bsg;
995         struct fc_bsg_job *bsg_job;
996         uint16_t comp_status;
997
998         sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
999         if (!sp)
1000                 return;
1001
1002         sp_bsg = sp->ctx;
1003         bsg_job = sp_bsg->u.bsg_job;
1004
1005         type = NULL;
1006         switch (sp_bsg->type) {
1007         case SRB_CT_CMD:
1008                 type = "ct pass-through";
1009                 break;
1010         default:
1011                 qla_printk(KERN_WARNING, ha,
1012                     "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1013                     sp_bsg->type);
1014                 return;
1015         }
1016
1017         comp_status = le16_to_cpu(pkt->comp_status);
1018
1019         /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1020          * fc payload  to the caller
1021          */
1022         bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1023         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1024
1025         if (comp_status != CS_COMPLETE) {
1026                 if (comp_status == CS_DATA_UNDERRUN) {
1027                         bsg_job->reply->result = DID_OK << 16;
1028                         bsg_job->reply->reply_payload_rcv_len =
1029                             le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1030
1031                         DEBUG2(qla_printk(KERN_WARNING, ha,
1032                             "scsi(%ld): CT pass-through-%s error "
1033                             "comp_status-status=0x%x total_byte = 0x%x.\n",
1034                             vha->host_no, type, comp_status,
1035                             bsg_job->reply->reply_payload_rcv_len));
1036                 } else {
1037                         DEBUG2(qla_printk(KERN_WARNING, ha,
1038                             "scsi(%ld): CT pass-through-%s error "
1039                             "comp_status-status=0x%x.\n",
1040                             vha->host_no, type, comp_status));
1041                         bsg_job->reply->result = DID_ERROR << 16;
1042                         bsg_job->reply->reply_payload_rcv_len = 0;
1043                 }
1044                 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
1045         } else {
1046                 bsg_job->reply->result =  DID_OK << 16;;
1047                 bsg_job->reply->reply_payload_rcv_len =
1048                     bsg_job->reply_payload.payload_len;
1049                 bsg_job->reply_len = 0;
1050         }
1051
1052         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1053             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1054
1055         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1056             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1057
1058         if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
1059                 kfree(sp->fcport);
1060
1061         kfree(sp->ctx);
1062         mempool_free(sp, ha->srb_mempool);
1063         bsg_job->job_done(bsg_job);
1064 }
1065
1066 static void
1067 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1068     struct sts_entry_24xx *pkt, int iocb_type)
1069 {
1070         const char func[] = "ELS_CT_IOCB";
1071         const char *type;
1072         struct qla_hw_data *ha = vha->hw;
1073         srb_t *sp;
1074         struct srb_ctx *sp_bsg;
1075         struct fc_bsg_job *bsg_job;
1076         uint16_t comp_status;
1077         uint32_t fw_status[3];
1078         uint8_t* fw_sts_ptr;
1079
1080         sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1081         if (!sp)
1082                 return;
1083         sp_bsg = sp->ctx;
1084         bsg_job = sp_bsg->u.bsg_job;
1085
1086         type = NULL;
1087         switch (sp_bsg->type) {
1088         case SRB_ELS_CMD_RPT:
1089         case SRB_ELS_CMD_HST:
1090                 type = "els";
1091                 break;
1092         case SRB_CT_CMD:
1093                 type = "ct pass-through";
1094                 break;
1095         default:
1096                 qla_printk(KERN_WARNING, ha,
1097                     "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
1098                     sp_bsg->type);
1099                 return;
1100         }
1101
1102         comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1103         fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1104         fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1105
1106         /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1107          * fc payload  to the caller
1108          */
1109         bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1110         bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1111
1112         if (comp_status != CS_COMPLETE) {
1113                 if (comp_status == CS_DATA_UNDERRUN) {
1114                         bsg_job->reply->result = DID_OK << 16;
1115                         bsg_job->reply->reply_payload_rcv_len =
1116                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1117
1118                         DEBUG2(qla_printk(KERN_WARNING, ha,
1119                             "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1120                             "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1121                                 vha->host_no, sp->handle, type, comp_status, fw_status[1], fw_status[2],
1122                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count)));
1123                         fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1124                         memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1125                 }
1126                 else {
1127                         DEBUG2(qla_printk(KERN_WARNING, ha,
1128                             "scsi(%ld:0x%x): ELS-CT pass-through-%s error comp_status-status=0x%x "
1129                             "error subcode 1=0x%x error subcode 2=0x%x.\n",
1130                                 vha->host_no, sp->handle, type, comp_status,
1131                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1),
1132                                 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2)));
1133                         bsg_job->reply->result = DID_ERROR << 16;
1134                         bsg_job->reply->reply_payload_rcv_len = 0;
1135                         fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1136                         memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1137                 }
1138                 DEBUG2(qla2x00_dump_buffer((uint8_t *)pkt, sizeof(*pkt)));
1139         }
1140         else {
1141                 bsg_job->reply->result =  DID_OK << 16;;
1142                 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1143                 bsg_job->reply_len = 0;
1144         }
1145
1146         dma_unmap_sg(&ha->pdev->dev,
1147             bsg_job->request_payload.sg_list,
1148             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1149         dma_unmap_sg(&ha->pdev->dev,
1150             bsg_job->reply_payload.sg_list,
1151             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1152         if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
1153             (sp_bsg->type == SRB_CT_CMD))
1154                 kfree(sp->fcport);
1155         kfree(sp->ctx);
1156         mempool_free(sp, ha->srb_mempool);
1157         bsg_job->job_done(bsg_job);
1158 }
1159
1160 static void
1161 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1162     struct logio_entry_24xx *logio)
1163 {
1164         const char func[] = "LOGIO-IOCB";
1165         const char *type;
1166         fc_port_t *fcport;
1167         srb_t *sp;
1168         struct srb_iocb *lio;
1169         struct srb_ctx *ctx;
1170         uint16_t *data;
1171         uint32_t iop[2];
1172
1173         sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1174         if (!sp)
1175                 return;
1176
1177         ctx = sp->ctx;
1178         lio = ctx->u.iocb_cmd;
1179         type = ctx->name;
1180         fcport = sp->fcport;
1181         data = lio->u.logio.data;
1182
1183         data[0] = MBS_COMMAND_ERROR;
1184         data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1185                 QLA_LOGIO_LOGIN_RETRIED : 0;
1186         if (logio->entry_status) {
1187                 DEBUG2(printk(KERN_WARNING
1188                     "scsi(%ld:%x): Async-%s error entry - "
1189                     "portid=%02x%02x%02x entry-status=%x.\n",
1190                     fcport->vha->host_no, sp->handle, type,
1191                     fcport->d_id.b.domain, fcport->d_id.b.area,
1192                     fcport->d_id.b.al_pa, logio->entry_status));
1193                 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
1194
1195                 goto logio_done;
1196         }
1197
1198         if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1199                 DEBUG2(printk(KERN_DEBUG
1200                     "scsi(%ld:%x): Async-%s complete - portid=%02x%02x%02x "
1201                     "iop0=%x.\n",
1202                     fcport->vha->host_no, sp->handle, type,
1203                     fcport->d_id.b.domain, fcport->d_id.b.area,
1204                     fcport->d_id.b.al_pa,
1205                     le32_to_cpu(logio->io_parameter[0])));
1206
1207                 data[0] = MBS_COMMAND_COMPLETE;
1208                 if (ctx->type != SRB_LOGIN_CMD)
1209                         goto logio_done;
1210
1211                 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1212                 if (iop[0] & BIT_4) {
1213                         fcport->port_type = FCT_TARGET;
1214                         if (iop[0] & BIT_8)
1215                                 fcport->flags |= FCF_FCP2_DEVICE;
1216                 } else if (iop[0] & BIT_5)
1217                         fcport->port_type = FCT_INITIATOR;
1218
1219                 if (logio->io_parameter[7] || logio->io_parameter[8])
1220                         fcport->supported_classes |= FC_COS_CLASS2;
1221                 if (logio->io_parameter[9] || logio->io_parameter[10])
1222                         fcport->supported_classes |= FC_COS_CLASS3;
1223
1224                 goto logio_done;
1225         }
1226
1227         iop[0] = le32_to_cpu(logio->io_parameter[0]);
1228         iop[1] = le32_to_cpu(logio->io_parameter[1]);
1229         switch (iop[0]) {
1230         case LSC_SCODE_PORTID_USED:
1231                 data[0] = MBS_PORT_ID_USED;
1232                 data[1] = LSW(iop[1]);
1233                 break;
1234         case LSC_SCODE_NPORT_USED:
1235                 data[0] = MBS_LOOP_ID_USED;
1236                 break;
1237         case LSC_SCODE_CMD_FAILED:
1238                 if ((iop[1] & 0xff) == 0x05) {
1239                         data[0] = MBS_NOT_LOGGED_IN;
1240                         break;
1241                 }
1242                 /* Fall through. */
1243         default:
1244                 data[0] = MBS_COMMAND_ERROR;
1245                 break;
1246         }
1247
1248         DEBUG2(printk(KERN_WARNING
1249             "scsi(%ld:%x): Async-%s failed - portid=%02x%02x%02x comp=%x "
1250             "iop0=%x iop1=%x.\n",
1251             fcport->vha->host_no, sp->handle, type, fcport->d_id.b.domain,
1252             fcport->d_id.b.area, fcport->d_id.b.al_pa,
1253             le16_to_cpu(logio->comp_status),
1254             le32_to_cpu(logio->io_parameter[0]),
1255             le32_to_cpu(logio->io_parameter[1])));
1256
1257 logio_done:
1258         lio->done(sp);
1259 }
1260
1261 static void
1262 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1263     struct tsk_mgmt_entry *tsk)
1264 {
1265         const char func[] = "TMF-IOCB";
1266         const char *type;
1267         fc_port_t *fcport;
1268         srb_t *sp;
1269         struct srb_iocb *iocb;
1270         struct srb_ctx *ctx;
1271         struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1272         int error = 1;
1273
1274         sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1275         if (!sp)
1276                 return;
1277
1278         ctx = sp->ctx;
1279         iocb = ctx->u.iocb_cmd;
1280         type = ctx->name;
1281         fcport = sp->fcport;
1282
1283         if (sts->entry_status) {
1284                 DEBUG2(printk(KERN_WARNING
1285                     "scsi(%ld:%x): Async-%s error - entry-status(%x).\n",
1286                     fcport->vha->host_no, sp->handle, type,
1287                     sts->entry_status));
1288         } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1289                 DEBUG2(printk(KERN_WARNING
1290                     "scsi(%ld:%x): Async-%s error - completion status(%x).\n",
1291                     fcport->vha->host_no, sp->handle, type,
1292                     sts->comp_status));
1293         } else if (!(le16_to_cpu(sts->scsi_status) &
1294             SS_RESPONSE_INFO_LEN_VALID)) {
1295                 DEBUG2(printk(KERN_WARNING
1296                     "scsi(%ld:%x): Async-%s error - no response info(%x).\n",
1297                     fcport->vha->host_no, sp->handle, type,
1298                     sts->scsi_status));
1299         } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1300                 DEBUG2(printk(KERN_WARNING
1301                     "scsi(%ld:%x): Async-%s error - not enough response(%d).\n",
1302                     fcport->vha->host_no, sp->handle, type,
1303                     sts->rsp_data_len));
1304         } else if (sts->data[3]) {
1305                 DEBUG2(printk(KERN_WARNING
1306                     "scsi(%ld:%x): Async-%s error - response(%x).\n",
1307                     fcport->vha->host_no, sp->handle, type,
1308                     sts->data[3]));
1309         } else {
1310                 error = 0;
1311         }
1312
1313         if (error) {
1314                 iocb->u.tmf.data = error;
1315                 DEBUG2(qla2x00_dump_buffer((uint8_t *)sts, sizeof(*sts)));
1316         }
1317
1318         iocb->done(sp);
1319 }
1320
1321 /**
1322  * qla2x00_process_response_queue() - Process response queue entries.
1323  * @ha: SCSI driver HA context
1324  */
1325 void
1326 qla2x00_process_response_queue(struct rsp_que *rsp)
1327 {
1328         struct scsi_qla_host *vha;
1329         struct qla_hw_data *ha = rsp->hw;
1330         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1331         sts_entry_t     *pkt;
1332         uint16_t        handle_cnt;
1333         uint16_t        cnt;
1334
1335         vha = pci_get_drvdata(ha->pdev);
1336
1337         if (!vha->flags.online)
1338                 return;
1339
1340         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1341                 pkt = (sts_entry_t *)rsp->ring_ptr;
1342
1343                 rsp->ring_index++;
1344                 if (rsp->ring_index == rsp->length) {
1345                         rsp->ring_index = 0;
1346                         rsp->ring_ptr = rsp->ring;
1347                 } else {
1348                         rsp->ring_ptr++;
1349                 }
1350
1351                 if (pkt->entry_status != 0) {
1352                         DEBUG3(printk(KERN_INFO
1353                             "scsi(%ld): Process error entry.\n", vha->host_no));
1354
1355                         qla2x00_error_entry(vha, rsp, pkt);
1356                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1357                         wmb();
1358                         continue;
1359                 }
1360
1361                 switch (pkt->entry_type) {
1362                 case STATUS_TYPE:
1363                         qla2x00_status_entry(vha, rsp, pkt);
1364                         break;
1365                 case STATUS_TYPE_21:
1366                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1367                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1368                                 qla2x00_process_completed_request(vha, rsp->req,
1369                                     ((sts21_entry_t *)pkt)->handle[cnt]);
1370                         }
1371                         break;
1372                 case STATUS_TYPE_22:
1373                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1374                         for (cnt = 0; cnt < handle_cnt; cnt++) {
1375                                 qla2x00_process_completed_request(vha, rsp->req,
1376                                     ((sts22_entry_t *)pkt)->handle[cnt]);
1377                         }
1378                         break;
1379                 case STATUS_CONT_TYPE:
1380                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1381                         break;
1382                 case MBX_IOCB_TYPE:
1383                         qla2x00_mbx_iocb_entry(vha, rsp->req,
1384                             (struct mbx_entry *)pkt);
1385                         break;
1386                 case CT_IOCB_TYPE:
1387                         qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1388                         break;
1389                 default:
1390                         /* Type Not Supported. */
1391                         DEBUG4(printk(KERN_WARNING
1392                             "scsi(%ld): Received unknown response pkt type %x "
1393                             "entry status=%x.\n",
1394                             vha->host_no, pkt->entry_type, pkt->entry_status));
1395                         break;
1396                 }
1397                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1398                 wmb();
1399         }
1400
1401         /* Adjust ring index */
1402         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1403 }
1404
1405 static inline void
1406
1407 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1408     uint32_t sense_len, struct rsp_que *rsp)
1409 {
1410         struct scsi_cmnd *cp = sp->cmd;
1411
1412         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1413                 sense_len = SCSI_SENSE_BUFFERSIZE;
1414
1415         sp->request_sense_length = sense_len;
1416         sp->request_sense_ptr = cp->sense_buffer;
1417         if (sp->request_sense_length > par_sense_len)
1418                 sense_len = par_sense_len;
1419
1420         memcpy(cp->sense_buffer, sense_data, sense_len);
1421
1422         sp->request_sense_ptr += sense_len;
1423         sp->request_sense_length -= sense_len;
1424         if (sp->request_sense_length != 0)
1425                 rsp->status_srb = sp;
1426
1427         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
1428             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
1429             cp->device->channel, cp->device->id, cp->device->lun, cp,
1430             cp->serial_number));
1431         if (sense_len)
1432                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
1433 }
1434
1435 struct scsi_dif_tuple {
1436         __be16 guard;       /* Checksum */
1437         __be16 app_tag;         /* APPL identifer */
1438         __be32 ref_tag;         /* Target LBA or indirect LBA */
1439 };
1440
1441 /*
1442  * Checks the guard or meta-data for the type of error
1443  * detected by the HBA. In case of errors, we set the
1444  * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1445  * to indicate to the kernel that the HBA detected error.
1446  */
1447 static inline void
1448 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1449 {
1450         struct scsi_cmnd *cmd = sp->cmd;
1451         struct scsi_dif_tuple   *ep =
1452                         (struct scsi_dif_tuple *)&sts24->data[20];
1453         struct scsi_dif_tuple   *ap =
1454                         (struct scsi_dif_tuple *)&sts24->data[12];
1455         uint32_t        e_ref_tag, a_ref_tag;
1456         uint16_t        e_app_tag, a_app_tag;
1457         uint16_t        e_guard, a_guard;
1458
1459         e_ref_tag = be32_to_cpu(ep->ref_tag);
1460         a_ref_tag = be32_to_cpu(ap->ref_tag);
1461         e_app_tag = be16_to_cpu(ep->app_tag);
1462         a_app_tag = be16_to_cpu(ap->app_tag);
1463         e_guard = be16_to_cpu(ep->guard);
1464         a_guard = be16_to_cpu(ap->guard);
1465
1466         DEBUG18(printk(KERN_DEBUG
1467             "%s(): iocb(s) %p Returned STATUS\n", __func__, sts24));
1468
1469         DEBUG18(printk(KERN_ERR "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1470             " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1471             " tag=0x%x, act guard=0x%x, exp guard=0x%x\n",
1472             cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1473             a_app_tag, e_app_tag, a_guard, e_guard));
1474
1475
1476         /* check guard */
1477         if (e_guard != a_guard) {
1478                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1479                     0x10, 0x1);
1480                 set_driver_byte(cmd, DRIVER_SENSE);
1481                 set_host_byte(cmd, DID_ABORT);
1482                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1483                 return;
1484         }
1485
1486         /* check appl tag */
1487         if (e_app_tag != a_app_tag) {
1488                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1489                     0x10, 0x2);
1490                 set_driver_byte(cmd, DRIVER_SENSE);
1491                 set_host_byte(cmd, DID_ABORT);
1492                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1493                 return;
1494         }
1495
1496         /* check ref tag */
1497         if (e_ref_tag != a_ref_tag) {
1498                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1499                     0x10, 0x3);
1500                 set_driver_byte(cmd, DRIVER_SENSE);
1501                 set_host_byte(cmd, DID_ABORT);
1502                 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1503                 return;
1504         }
1505 }
1506
1507 /**
1508  * qla2x00_status_entry() - Process a Status IOCB entry.
1509  * @ha: SCSI driver HA context
1510  * @pkt: Entry pointer
1511  */
1512 static void
1513 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1514 {
1515         srb_t           *sp;
1516         fc_port_t       *fcport;
1517         struct scsi_cmnd *cp;
1518         sts_entry_t *sts;
1519         struct sts_entry_24xx *sts24;
1520         uint16_t        comp_status;
1521         uint16_t        scsi_status;
1522         uint16_t        ox_id;
1523         uint8_t         lscsi_status;
1524         int32_t         resid;
1525         uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1526             fw_resid_len;
1527         uint8_t         *rsp_info, *sense_data;
1528         struct qla_hw_data *ha = vha->hw;
1529         uint32_t handle;
1530         uint16_t que;
1531         struct req_que *req;
1532         int logit = 1;
1533
1534         sts = (sts_entry_t *) pkt;
1535         sts24 = (struct sts_entry_24xx *) pkt;
1536         if (IS_FWI2_CAPABLE(ha)) {
1537                 comp_status = le16_to_cpu(sts24->comp_status);
1538                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1539         } else {
1540                 comp_status = le16_to_cpu(sts->comp_status);
1541                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1542         }
1543         handle = (uint32_t) LSW(sts->handle);
1544         que = MSW(sts->handle);
1545         req = ha->req_q_map[que];
1546
1547         /* Fast path completion. */
1548         if (comp_status == CS_COMPLETE && scsi_status == 0) {
1549                 qla2x00_process_completed_request(vha, req, handle);
1550
1551                 return;
1552         }
1553
1554         /* Validate handle. */
1555         if (handle < MAX_OUTSTANDING_COMMANDS) {
1556                 sp = req->outstanding_cmds[handle];
1557                 req->outstanding_cmds[handle] = NULL;
1558         } else
1559                 sp = NULL;
1560
1561         if (sp == NULL) {
1562                 qla_printk(KERN_WARNING, ha,
1563                     "scsi(%ld): Invalid status handle (0x%x).\n", vha->host_no,
1564                     sts->handle);
1565
1566                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1567                 qla2xxx_wake_dpc(vha);
1568                 return;
1569         }
1570         cp = sp->cmd;
1571         if (cp == NULL) {
1572                 qla_printk(KERN_WARNING, ha,
1573                     "scsi(%ld): Command already returned (0x%x/%p).\n",
1574                     vha->host_no, sts->handle, sp);
1575
1576                 return;
1577         }
1578
1579         lscsi_status = scsi_status & STATUS_MASK;
1580
1581         fcport = sp->fcport;
1582
1583         ox_id = 0;
1584         sense_len = par_sense_len = rsp_info_len = resid_len =
1585             fw_resid_len = 0;
1586         if (IS_FWI2_CAPABLE(ha)) {
1587                 if (scsi_status & SS_SENSE_LEN_VALID)
1588                         sense_len = le32_to_cpu(sts24->sense_len);
1589                 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1590                         rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1591                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1592                         resid_len = le32_to_cpu(sts24->rsp_residual_count);
1593                 if (comp_status == CS_DATA_UNDERRUN)
1594                         fw_resid_len = le32_to_cpu(sts24->residual_len);
1595                 rsp_info = sts24->data;
1596                 sense_data = sts24->data;
1597                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1598                 ox_id = le16_to_cpu(sts24->ox_id);
1599                 par_sense_len = sizeof(sts24->data);
1600         } else {
1601                 if (scsi_status & SS_SENSE_LEN_VALID)
1602                         sense_len = le16_to_cpu(sts->req_sense_length);
1603                 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1604                         rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1605                 resid_len = le32_to_cpu(sts->residual_length);
1606                 rsp_info = sts->rsp_info;
1607                 sense_data = sts->req_sense_data;
1608                 par_sense_len = sizeof(sts->req_sense_data);
1609         }
1610
1611         /* Check for any FCP transport errors. */
1612         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1613                 /* Sense data lies beyond any FCP RESPONSE data. */
1614                 if (IS_FWI2_CAPABLE(ha)) {
1615                         sense_data += rsp_info_len;
1616                         par_sense_len -= rsp_info_len;
1617                 }
1618                 if (rsp_info_len > 3 && rsp_info[3]) {
1619                         DEBUG2(qla_printk(KERN_INFO, ha,
1620                             "scsi(%ld:%d:%d): FCP I/O protocol failure "
1621                             "(0x%x/0x%x).\n", vha->host_no, cp->device->id,
1622                             cp->device->lun, rsp_info_len, rsp_info[3]));
1623
1624                         cp->result = DID_BUS_BUSY << 16;
1625                         goto out;
1626                 }
1627         }
1628
1629         /* Check for overrun. */
1630         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1631             scsi_status & SS_RESIDUAL_OVER)
1632                 comp_status = CS_DATA_OVERRUN;
1633
1634         /*
1635          * Based on Host and scsi status generate status code for Linux
1636          */
1637         switch (comp_status) {
1638         case CS_COMPLETE:
1639         case CS_QUEUE_FULL:
1640                 if (scsi_status == 0) {
1641                         cp->result = DID_OK << 16;
1642                         break;
1643                 }
1644                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1645                         resid = resid_len;
1646                         scsi_set_resid(cp, resid);
1647
1648                         if (!lscsi_status &&
1649                             ((unsigned)(scsi_bufflen(cp) - resid) <
1650                              cp->underflow)) {
1651                                 qla_printk(KERN_INFO, ha,
1652                                     "scsi(%ld:%d:%d): Mid-layer underflow "
1653                                     "detected (0x%x of 0x%x bytes).\n",
1654                                     vha->host_no, cp->device->id,
1655                                     cp->device->lun, resid, scsi_bufflen(cp));
1656
1657                                 cp->result = DID_ERROR << 16;
1658                                 break;
1659                         }
1660                 }
1661                 cp->result = DID_OK << 16 | lscsi_status;
1662
1663                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1664                         DEBUG2(qla_printk(KERN_INFO, ha,
1665                             "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
1666                             vha->host_no, cp->device->id, cp->device->lun));
1667                         break;
1668                 }
1669                 logit = 0;
1670                 if (lscsi_status != SS_CHECK_CONDITION)
1671                         break;
1672
1673                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1674                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1675                         break;
1676
1677                 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
1678                     rsp);
1679                 break;
1680
1681         case CS_DATA_UNDERRUN:
1682                 /* Use F/W calculated residual length. */
1683                 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1684                 scsi_set_resid(cp, resid);
1685                 if (scsi_status & SS_RESIDUAL_UNDER) {
1686                         if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1687                                 DEBUG2(qla_printk(KERN_INFO, ha,
1688                                     "scsi(%ld:%d:%d) Dropped frame(s) detected "
1689                                     "(0x%x of 0x%x bytes).\n", vha->host_no,
1690                                     cp->device->id, cp->device->lun, resid,
1691                                     scsi_bufflen(cp)));
1692
1693                                 cp->result = DID_ERROR << 16 | lscsi_status;
1694                                 break;
1695                         }
1696
1697                         if (!lscsi_status &&
1698                             ((unsigned)(scsi_bufflen(cp) - resid) <
1699                             cp->underflow)) {
1700                                 qla_printk(KERN_INFO, ha,
1701                                     "scsi(%ld:%d:%d): Mid-layer underflow "
1702                                     "detected (0x%x of 0x%x bytes).\n",
1703                                     vha->host_no, cp->device->id,
1704                                     cp->device->lun, resid, scsi_bufflen(cp));
1705
1706                                 cp->result = DID_ERROR << 16;
1707                                 break;
1708                         }
1709                 } else if (!lscsi_status) {
1710                         DEBUG2(qla_printk(KERN_INFO, ha,
1711                             "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
1712                             "of 0x%x bytes).\n", vha->host_no, cp->device->id,
1713                             cp->device->lun, resid, scsi_bufflen(cp)));
1714
1715                         cp->result = DID_ERROR << 16;
1716                         break;
1717                 }
1718
1719                 cp->result = DID_OK << 16 | lscsi_status;
1720                 logit = 0;
1721
1722                 /*
1723                  * Check to see if SCSI Status is non zero. If so report SCSI
1724                  * Status.
1725                  */
1726                 if (lscsi_status != 0) {
1727                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1728                                 DEBUG2(qla_printk(KERN_INFO, ha,
1729                                     "scsi(%ld:%d:%d) QUEUE FULL detected.\n",
1730                                     vha->host_no, cp->device->id,
1731                                     cp->device->lun));
1732                                 logit = 1;
1733                                 break;
1734                         }
1735                         if (lscsi_status != SS_CHECK_CONDITION)
1736                                 break;
1737
1738                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1739                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1740                                 break;
1741
1742                         qla2x00_handle_sense(sp, sense_data, par_sense_len,
1743                             sense_len, rsp);
1744                 }
1745                 break;
1746
1747         case CS_PORT_LOGGED_OUT:
1748         case CS_PORT_CONFIG_CHG:
1749         case CS_PORT_BUSY:
1750         case CS_INCOMPLETE:
1751         case CS_PORT_UNAVAILABLE:
1752         case CS_TIMEOUT:
1753                 /*
1754                  * We are going to have the fc class block the rport
1755                  * while we try to recover so instruct the mid layer
1756                  * to requeue until the class decides how to handle this.
1757                  */
1758                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1759
1760                 if (comp_status == CS_TIMEOUT) {
1761                         if (IS_FWI2_CAPABLE(ha))
1762                                 break;
1763                         else if ((le16_to_cpu(sts->status_flags) &
1764                             SF_LOGOUT_SENT) == 0)
1765                                 break;
1766                 }
1767
1768                 DEBUG2(qla_printk(KERN_INFO, ha,
1769                         "scsi(%ld:%d:%d) Port down status: port-state=0x%x\n",
1770                         vha->host_no, cp->device->id, cp->device->lun,
1771                         atomic_read(&fcport->state)));
1772
1773                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1774                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1775                 break;
1776
1777         case CS_RESET:
1778                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1779                 break;
1780
1781         case CS_ABORTED:
1782                 cp->result = DID_RESET << 16;
1783                 break;
1784
1785         case CS_DIF_ERROR:
1786                 qla2x00_handle_dif_error(sp, sts24);
1787                 break;
1788         default:
1789                 cp->result = DID_ERROR << 16;
1790                 break;
1791         }
1792
1793 out:
1794         if (logit)
1795                 DEBUG2(qla_printk(KERN_INFO, ha,
1796                     "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
1797                     "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x "
1798                     "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
1799                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1800                     cp->result, ox_id, cp->serial_number, cp->cmnd[0],
1801                     cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
1802                     resid_len, fw_resid_len));
1803
1804         if (rsp->status_srb == NULL)
1805                 qla2x00_sp_compl(ha, sp);
1806 }
1807
1808 /**
1809  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1810  * @ha: SCSI driver HA context
1811  * @pkt: Entry pointer
1812  *
1813  * Extended sense data.
1814  */
1815 static void
1816 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1817 {
1818         uint8_t         sense_sz = 0;
1819         struct qla_hw_data *ha = rsp->hw;
1820         srb_t           *sp = rsp->status_srb;
1821         struct scsi_cmnd *cp;
1822
1823         if (sp != NULL && sp->request_sense_length != 0) {
1824                 cp = sp->cmd;
1825                 if (cp == NULL) {
1826                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1827                             "sp=%p.\n", __func__, sp));
1828                         qla_printk(KERN_INFO, ha,
1829                             "cmd is NULL: already returned to OS (sp=%p)\n",
1830                             sp);
1831
1832                         rsp->status_srb = NULL;
1833                         return;
1834                 }
1835
1836                 if (sp->request_sense_length > sizeof(pkt->data)) {
1837                         sense_sz = sizeof(pkt->data);
1838                 } else {
1839                         sense_sz = sp->request_sense_length;
1840                 }
1841
1842                 /* Move sense data. */
1843                 if (IS_FWI2_CAPABLE(ha))
1844                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1845                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1846                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1847
1848                 sp->request_sense_ptr += sense_sz;
1849                 sp->request_sense_length -= sense_sz;
1850
1851                 /* Place command on done queue. */
1852                 if (sp->request_sense_length == 0) {
1853                         rsp->status_srb = NULL;
1854                         qla2x00_sp_compl(ha, sp);
1855                 }
1856         }
1857 }
1858
1859 /**
1860  * qla2x00_error_entry() - Process an error entry.
1861  * @ha: SCSI driver HA context
1862  * @pkt: Entry pointer
1863  */
1864 static void
1865 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1866 {
1867         srb_t *sp;
1868         struct qla_hw_data *ha = vha->hw;
1869         uint32_t handle = LSW(pkt->handle);
1870         uint16_t que = MSW(pkt->handle);
1871         struct req_que *req = ha->req_q_map[que];
1872 #if defined(QL_DEBUG_LEVEL_2)
1873         if (pkt->entry_status & RF_INV_E_ORDER)
1874                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1875         else if (pkt->entry_status & RF_INV_E_COUNT)
1876                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1877         else if (pkt->entry_status & RF_INV_E_PARAM)
1878                 qla_printk(KERN_ERR, ha,
1879                     "%s: Invalid Entry Parameter\n", __func__);
1880         else if (pkt->entry_status & RF_INV_E_TYPE)
1881                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1882         else if (pkt->entry_status & RF_BUSY)
1883                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1884         else
1885                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1886 #endif
1887
1888         /* Validate handle. */
1889         if (handle < MAX_OUTSTANDING_COMMANDS)
1890                 sp = req->outstanding_cmds[handle];
1891         else
1892                 sp = NULL;
1893
1894         if (sp) {
1895                 /* Free outstanding command slot. */
1896                 req->outstanding_cmds[handle] = NULL;
1897
1898                 /* Bad payload or header */
1899                 if (pkt->entry_status &
1900                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1901                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1902                         sp->cmd->result = DID_ERROR << 16;
1903                 } else if (pkt->entry_status & RF_BUSY) {
1904                         sp->cmd->result = DID_BUS_BUSY << 16;
1905                 } else {
1906                         sp->cmd->result = DID_ERROR << 16;
1907                 }
1908                 qla2x00_sp_compl(ha, sp);
1909
1910         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1911             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1912                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1913                     vha->host_no));
1914                 qla_printk(KERN_WARNING, ha,
1915                     "Error entry - invalid handle\n");
1916
1917                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1918                 qla2xxx_wake_dpc(vha);
1919         }
1920 }
1921
1922 /**
1923  * qla24xx_mbx_completion() - Process mailbox command completions.
1924  * @ha: SCSI driver HA context
1925  * @mb0: Mailbox0 register
1926  */
1927 static void
1928 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1929 {
1930         uint16_t        cnt;
1931         uint16_t __iomem *wptr;
1932         struct qla_hw_data *ha = vha->hw;
1933         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1934
1935         /* Load return mailbox registers. */
1936         ha->flags.mbox_int = 1;
1937         ha->mailbox_out[0] = mb0;
1938         wptr = (uint16_t __iomem *)&reg->mailbox1;
1939
1940         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1941                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1942                 wptr++;
1943         }
1944
1945         if (ha->mcp) {
1946                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1947                     __func__, vha->host_no, ha->mcp->mb[0]));
1948         } else {
1949                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1950                     __func__, vha->host_no));
1951         }
1952 }
1953
1954 /**
1955  * qla24xx_process_response_queue() - Process response queue entries.
1956  * @ha: SCSI driver HA context
1957  */
1958 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1959         struct rsp_que *rsp)
1960 {
1961         struct sts_entry_24xx *pkt;
1962         struct qla_hw_data *ha = vha->hw;
1963
1964         if (!vha->flags.online)
1965                 return;
1966
1967         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1968                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1969
1970                 rsp->ring_index++;
1971                 if (rsp->ring_index == rsp->length) {
1972                         rsp->ring_index = 0;
1973                         rsp->ring_ptr = rsp->ring;
1974                 } else {
1975                         rsp->ring_ptr++;
1976                 }
1977
1978                 if (pkt->entry_status != 0) {
1979                         DEBUG3(printk(KERN_INFO
1980                             "scsi(%ld): Process error entry.\n", vha->host_no));
1981
1982                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1983                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1984                         wmb();
1985                         continue;
1986                 }
1987
1988                 switch (pkt->entry_type) {
1989                 case STATUS_TYPE:
1990                         qla2x00_status_entry(vha, rsp, pkt);
1991                         break;
1992                 case STATUS_CONT_TYPE:
1993                         qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1994                         break;
1995                 case VP_RPT_ID_IOCB_TYPE:
1996                         qla24xx_report_id_acquisition(vha,
1997                             (struct vp_rpt_id_entry_24xx *)pkt);
1998                         break;
1999                 case LOGINOUT_PORT_IOCB_TYPE:
2000                         qla24xx_logio_entry(vha, rsp->req,
2001                             (struct logio_entry_24xx *)pkt);
2002                         break;
2003                 case TSK_MGMT_IOCB_TYPE:
2004                         qla24xx_tm_iocb_entry(vha, rsp->req,
2005                             (struct tsk_mgmt_entry *)pkt);
2006                         break;
2007                 case CT_IOCB_TYPE:
2008                         qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2009                         clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
2010                         break;
2011                 case ELS_IOCB_TYPE:
2012                         qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2013                         break;
2014                 default:
2015                         /* Type Not Supported. */
2016                         DEBUG4(printk(KERN_WARNING
2017                             "scsi(%ld): Received unknown response pkt type %x "
2018                             "entry status=%x.\n",
2019                             vha->host_no, pkt->entry_type, pkt->entry_status));
2020                         break;
2021                 }
2022                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2023                 wmb();
2024         }
2025
2026         /* Adjust ring index */
2027         if (IS_QLA82XX(ha)) {
2028                 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2029                 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2030         } else
2031                 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2032 }
2033
2034 static void
2035 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2036 {
2037         int rval;
2038         uint32_t cnt;
2039         struct qla_hw_data *ha = vha->hw;
2040         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2041
2042         if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
2043                 return;
2044
2045         rval = QLA_SUCCESS;
2046         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2047         RD_REG_DWORD(&reg->iobase_addr);
2048         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2049         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2050             rval == QLA_SUCCESS; cnt--) {
2051                 if (cnt) {
2052                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2053                         udelay(10);
2054                 } else
2055                         rval = QLA_FUNCTION_TIMEOUT;
2056         }
2057         if (rval == QLA_SUCCESS)
2058                 goto next_test;
2059
2060         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2061         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2062             rval == QLA_SUCCESS; cnt--) {
2063                 if (cnt) {
2064                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2065                         udelay(10);
2066                 } else
2067                         rval = QLA_FUNCTION_TIMEOUT;
2068         }
2069         if (rval != QLA_SUCCESS)
2070                 goto done;
2071
2072 next_test:
2073         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2074                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
2075
2076 done:
2077         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2078         RD_REG_DWORD(&reg->iobase_window);
2079 }
2080
2081 /**
2082  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2083  * @irq:
2084  * @dev_id: SCSI driver HA context
2085  *
2086  * Called by system whenever the host adapter generates an interrupt.
2087  *
2088  * Returns handled flag.
2089  */
2090 irqreturn_t
2091 qla24xx_intr_handler(int irq, void *dev_id)
2092 {
2093         scsi_qla_host_t *vha;
2094         struct qla_hw_data *ha;
2095         struct device_reg_24xx __iomem *reg;
2096         int             status;
2097         unsigned long   iter;
2098         uint32_t        stat;
2099         uint32_t        hccr;
2100         uint16_t        mb[4];
2101         struct rsp_que *rsp;
2102         unsigned long   flags;
2103
2104         rsp = (struct rsp_que *) dev_id;
2105         if (!rsp) {
2106                 printk(KERN_INFO
2107                     "%s(): NULL response queue pointer\n", __func__);
2108                 return IRQ_NONE;
2109         }
2110
2111         ha = rsp->hw;
2112         reg = &ha->iobase->isp24;
2113         status = 0;
2114
2115         if (unlikely(pci_channel_offline(ha->pdev)))
2116                 return IRQ_HANDLED;
2117
2118         spin_lock_irqsave(&ha->hardware_lock, flags);
2119         vha = pci_get_drvdata(ha->pdev);
2120         for (iter = 50; iter--; ) {
2121                 stat = RD_REG_DWORD(&reg->host_status);
2122                 if (stat & HSRX_RISC_PAUSED) {
2123                         if (unlikely(pci_channel_offline(ha->pdev)))
2124                                 break;
2125
2126                         hccr = RD_REG_DWORD(&reg->hccr);
2127
2128                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
2129                             "Dumping firmware!\n", hccr);
2130
2131                         qla2xxx_check_risc_status(vha);
2132
2133                         ha->isp_ops->fw_dump(vha, 1);
2134                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2135                         break;
2136                 } else if ((stat & HSRX_RISC_INT) == 0)
2137                         break;
2138
2139                 switch (stat & 0xff) {
2140                 case 0x1:
2141                 case 0x2:
2142                 case 0x10:
2143                 case 0x11:
2144                         qla24xx_mbx_completion(vha, MSW(stat));
2145                         status |= MBX_INTERRUPT;
2146
2147                         break;
2148                 case 0x12:
2149                         mb[0] = MSW(stat);
2150                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2151                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2152                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2153                         qla2x00_async_event(vha, rsp, mb);
2154                         break;
2155                 case 0x13:
2156                 case 0x14:
2157                         qla24xx_process_response_queue(vha, rsp);
2158                         break;
2159                 default:
2160                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2161                             "(%d).\n",
2162                             vha->host_no, stat & 0xff));
2163                         break;
2164                 }
2165                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2166                 RD_REG_DWORD_RELAXED(&reg->hccr);
2167         }
2168         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2169
2170         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2171             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2172                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2173                 complete(&ha->mbx_intr_comp);
2174         }
2175
2176         return IRQ_HANDLED;
2177 }
2178
2179 static irqreturn_t
2180 qla24xx_msix_rsp_q(int irq, void *dev_id)
2181 {
2182         struct qla_hw_data *ha;
2183         struct rsp_que *rsp;
2184         struct device_reg_24xx __iomem *reg;
2185         struct scsi_qla_host *vha;
2186         unsigned long flags;
2187
2188         rsp = (struct rsp_que *) dev_id;
2189         if (!rsp) {
2190                 printk(KERN_INFO
2191                 "%s(): NULL response queue pointer\n", __func__);
2192                 return IRQ_NONE;
2193         }
2194         ha = rsp->hw;
2195         reg = &ha->iobase->isp24;
2196
2197         spin_lock_irqsave(&ha->hardware_lock, flags);
2198
2199         vha = pci_get_drvdata(ha->pdev);
2200         qla24xx_process_response_queue(vha, rsp);
2201         if (!ha->flags.disable_msix_handshake) {
2202                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2203                 RD_REG_DWORD_RELAXED(&reg->hccr);
2204         }
2205         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2206
2207         return IRQ_HANDLED;
2208 }
2209
2210 static irqreturn_t
2211 qla25xx_msix_rsp_q(int irq, void *dev_id)
2212 {
2213         struct qla_hw_data *ha;
2214         struct rsp_que *rsp;
2215         struct device_reg_24xx __iomem *reg;
2216         unsigned long flags;
2217
2218         rsp = (struct rsp_que *) dev_id;
2219         if (!rsp) {
2220                 printk(KERN_INFO
2221                         "%s(): NULL response queue pointer\n", __func__);
2222                 return IRQ_NONE;
2223         }
2224         ha = rsp->hw;
2225
2226         /* Clear the interrupt, if enabled, for this response queue */
2227         if (rsp->options & ~BIT_6) {
2228                 reg = &ha->iobase->isp24;
2229                 spin_lock_irqsave(&ha->hardware_lock, flags);
2230                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2231                 RD_REG_DWORD_RELAXED(&reg->hccr);
2232                 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2233         }
2234         queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2235
2236         return IRQ_HANDLED;
2237 }
2238
2239 static irqreturn_t
2240 qla24xx_msix_default(int irq, void *dev_id)
2241 {
2242         scsi_qla_host_t *vha;
2243         struct qla_hw_data *ha;
2244         struct rsp_que *rsp;
2245         struct device_reg_24xx __iomem *reg;
2246         int             status;
2247         uint32_t        stat;
2248         uint32_t        hccr;
2249         uint16_t        mb[4];
2250         unsigned long flags;
2251
2252         rsp = (struct rsp_que *) dev_id;
2253         if (!rsp) {
2254                 DEBUG(printk(
2255                 "%s(): NULL response queue pointer\n", __func__));
2256                 return IRQ_NONE;
2257         }
2258         ha = rsp->hw;
2259         reg = &ha->iobase->isp24;
2260         status = 0;
2261
2262         spin_lock_irqsave(&ha->hardware_lock, flags);
2263         vha = pci_get_drvdata(ha->pdev);
2264         do {
2265                 stat = RD_REG_DWORD(&reg->host_status);
2266                 if (stat & HSRX_RISC_PAUSED) {
2267                         if (unlikely(pci_channel_offline(ha->pdev)))
2268                                 break;
2269
2270                         hccr = RD_REG_DWORD(&reg->hccr);
2271
2272                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
2273                             "Dumping firmware!\n", hccr);
2274
2275                         qla2xxx_check_risc_status(vha);
2276
2277                         ha->isp_ops->fw_dump(vha, 1);
2278                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2279                         break;
2280                 } else if ((stat & HSRX_RISC_INT) == 0)
2281                         break;
2282
2283                 switch (stat & 0xff) {
2284                 case 0x1:
2285                 case 0x2:
2286                 case 0x10:
2287                 case 0x11:
2288                         qla24xx_mbx_completion(vha, MSW(stat));
2289                         status |= MBX_INTERRUPT;
2290
2291                         break;
2292                 case 0x12:
2293                         mb[0] = MSW(stat);
2294                         mb[1] = RD_REG_WORD(&reg->mailbox1);
2295                         mb[2] = RD_REG_WORD(&reg->mailbox2);
2296                         mb[3] = RD_REG_WORD(&reg->mailbox3);
2297                         qla2x00_async_event(vha, rsp, mb);
2298                         break;
2299                 case 0x13:
2300                 case 0x14:
2301                         qla24xx_process_response_queue(vha, rsp);
2302                         break;
2303                 default:
2304                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2305                             "(%d).\n",
2306                             vha->host_no, stat & 0xff));
2307                         break;
2308                 }
2309                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2310         } while (0);
2311         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2312
2313         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2314             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2315                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2316                 complete(&ha->mbx_intr_comp);
2317         }
2318         return IRQ_HANDLED;
2319 }
2320
2321 /* Interrupt handling helpers. */
2322
2323 struct qla_init_msix_entry {
2324         const char *name;
2325         irq_handler_t handler;
2326 };
2327
2328 static struct qla_init_msix_entry msix_entries[3] = {
2329         { "qla2xxx (default)", qla24xx_msix_default },
2330         { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2331         { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2332 };
2333
2334 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2335         { "qla2xxx (default)", qla82xx_msix_default },
2336         { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2337 };
2338
2339 static void
2340 qla24xx_disable_msix(struct qla_hw_data *ha)
2341 {
2342         int i;
2343         struct qla_msix_entry *qentry;
2344
2345         for (i = 0; i < ha->msix_count; i++) {
2346                 qentry = &ha->msix_entries[i];
2347                 if (qentry->have_irq)
2348                         free_irq(qentry->vector, qentry->rsp);
2349         }
2350         pci_disable_msix(ha->pdev);
2351         kfree(ha->msix_entries);
2352         ha->msix_entries = NULL;
2353         ha->flags.msix_enabled = 0;
2354 }
2355
2356 static int
2357 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2358 {
2359 #define MIN_MSIX_COUNT  2
2360         int i, ret;
2361         struct msix_entry *entries;
2362         struct qla_msix_entry *qentry;
2363
2364         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2365                         GFP_KERNEL);
2366         if (!entries)
2367                 return -ENOMEM;
2368
2369         for (i = 0; i < ha->msix_count; i++)
2370                 entries[i].entry = i;
2371
2372         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2373         if (ret) {
2374                 if (ret < MIN_MSIX_COUNT)
2375                         goto msix_failed;
2376
2377                 qla_printk(KERN_WARNING, ha,
2378                         "MSI-X: Failed to enable support -- %d/%d\n"
2379                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
2380                 ha->msix_count = ret;
2381                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2382                 if (ret) {
2383 msix_failed:
2384                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
2385                                 " support, giving up -- %d/%d\n",
2386                                 ha->msix_count, ret);
2387                         goto msix_out;
2388                 }
2389                 ha->max_rsp_queues = ha->msix_count - 1;
2390         }
2391         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2392                                 ha->msix_count, GFP_KERNEL);
2393         if (!ha->msix_entries) {
2394                 ret = -ENOMEM;
2395                 goto msix_out;
2396         }
2397         ha->flags.msix_enabled = 1;
2398
2399         for (i = 0; i < ha->msix_count; i++) {
2400                 qentry = &ha->msix_entries[i];
2401                 qentry->vector = entries[i].vector;
2402                 qentry->entry = entries[i].entry;
2403                 qentry->have_irq = 0;
2404                 qentry->rsp = NULL;
2405         }
2406
2407         /* Enable MSI-X vectors for the base queue */
2408         for (i = 0; i < 2; i++) {
2409                 qentry = &ha->msix_entries[i];
2410                 if (IS_QLA82XX(ha)) {
2411                         ret = request_irq(qentry->vector,
2412                                 qla82xx_msix_entries[i].handler,
2413                                 0, qla82xx_msix_entries[i].name, rsp);
2414                 } else {
2415                         ret = request_irq(qentry->vector,
2416                                 msix_entries[i].handler,
2417                                 0, msix_entries[i].name, rsp);
2418                 }
2419                 if (ret) {
2420                         qla_printk(KERN_WARNING, ha,
2421                         "MSI-X: Unable to register handler -- %x/%d.\n",
2422                         qentry->vector, ret);
2423                         qla24xx_disable_msix(ha);
2424                         ha->mqenable = 0;
2425                         goto msix_out;
2426                 }
2427                 qentry->have_irq = 1;
2428                 qentry->rsp = rsp;
2429                 rsp->msix = qentry;
2430         }
2431
2432         /* Enable MSI-X vector for response queue update for queue 0 */
2433         if (ha->mqiobase &&  (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2434                 ha->mqenable = 1;
2435
2436 msix_out:
2437         kfree(entries);
2438         return ret;
2439 }
2440
2441 int
2442 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2443 {
2444         int ret;
2445         device_reg_t __iomem *reg = ha->iobase;
2446
2447         /* If possible, enable MSI-X. */
2448         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
2449                 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
2450                 goto skip_msi;
2451
2452         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2453                 (ha->pdev->subsystem_device == 0x7040 ||
2454                 ha->pdev->subsystem_device == 0x7041 ||
2455                 ha->pdev->subsystem_device == 0x1705)) {
2456                 DEBUG2(qla_printk(KERN_WARNING, ha,
2457                         "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
2458                         ha->pdev->subsystem_vendor,
2459                         ha->pdev->subsystem_device));
2460                 goto skip_msi;
2461         }
2462
2463         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
2464                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
2465                 DEBUG2(qla_printk(KERN_WARNING, ha,
2466                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
2467                         ha->pdev->revision, ha->fw_attributes));
2468                 goto skip_msix;
2469         }
2470
2471         ret = qla24xx_enable_msix(ha, rsp);
2472         if (!ret) {
2473                 DEBUG2(qla_printk(KERN_INFO, ha,
2474                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
2475                     ha->fw_attributes));
2476                 goto clear_risc_ints;
2477         }
2478         qla_printk(KERN_WARNING, ha,
2479             "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
2480 skip_msix:
2481
2482         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2483             !IS_QLA8001(ha))
2484                 goto skip_msi;
2485
2486         ret = pci_enable_msi(ha->pdev);
2487         if (!ret) {
2488                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
2489                 ha->flags.msi_enabled = 1;
2490         } else
2491                 qla_printk(KERN_WARNING, ha,
2492                     "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
2493 skip_msi:
2494
2495         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2496             IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
2497         if (ret) {
2498                 qla_printk(KERN_WARNING, ha,
2499                     "Failed to reserve interrupt %d already in use.\n",
2500                     ha->pdev->irq);
2501                 goto fail;
2502         }
2503         ha->flags.inta_enabled = 1;
2504 clear_risc_ints:
2505
2506         /*
2507          * FIXME: Noted that 8014s were being dropped during NK testing.
2508          * Timing deltas during MSI-X/INTa transitions?
2509          */
2510         if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
2511                 goto fail;
2512         spin_lock_irq(&ha->hardware_lock);
2513         if (IS_FWI2_CAPABLE(ha)) {
2514                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2515                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2516         } else {
2517                 WRT_REG_WORD(&reg->isp.semaphore, 0);
2518                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2519                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2520         }
2521         spin_unlock_irq(&ha->hardware_lock);
2522
2523 fail:
2524         return ret;
2525 }
2526
2527 void
2528 qla2x00_free_irqs(scsi_qla_host_t *vha)
2529 {
2530         struct qla_hw_data *ha = vha->hw;
2531         struct rsp_que *rsp = ha->rsp_q_map[0];
2532
2533         if (ha->flags.msix_enabled)
2534                 qla24xx_disable_msix(ha);
2535         else if (ha->flags.msi_enabled) {
2536                 free_irq(ha->pdev->irq, rsp);
2537                 pci_disable_msi(ha->pdev);
2538         } else
2539                 free_irq(ha->pdev->irq, rsp);
2540 }
2541
2542
2543 int qla25xx_request_irq(struct rsp_que *rsp)
2544 {
2545         struct qla_hw_data *ha = rsp->hw;
2546         struct qla_init_msix_entry *intr = &msix_entries[2];
2547         struct qla_msix_entry *msix = rsp->msix;
2548         int ret;
2549
2550         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2551         if (ret) {
2552                 qla_printk(KERN_WARNING, ha,
2553                         "MSI-X: Unable to register handler -- %x/%d.\n",
2554                         msix->vector, ret);
2555                 return ret;
2556         }
2557         msix->have_irq = 1;
2558         msix->rsp = rsp;
2559         return ret;
2560 }