]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/scsi/qla2xxx/qla_isr.c
eb4b43d7697f1c8c21f20d43fa0646fcea85b78b
[net-next-2.6.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *,
14         struct req_que *, uint32_t);
15 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
16 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
17 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18         sts_entry_t *);
19 static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
20
21 /**
22  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
23  * @irq:
24  * @dev_id: SCSI driver HA context
25  *
26  * Called by system whenever the host adapter generates an interrupt.
27  *
28  * Returns handled flag.
29  */
30 irqreturn_t
31 qla2100_intr_handler(int irq, void *dev_id)
32 {
33         scsi_qla_host_t *vha;
34         struct qla_hw_data *ha;
35         struct device_reg_2xxx __iomem *reg;
36         int             status;
37         unsigned long   iter;
38         uint16_t        hccr;
39         uint16_t        mb[4];
40         struct rsp_que *rsp;
41
42         rsp = (struct rsp_que *) dev_id;
43         if (!rsp) {
44                 printk(KERN_INFO
45                     "%s(): NULL response queue pointer\n", __func__);
46                 return (IRQ_NONE);
47         }
48
49         ha = rsp->hw;
50         reg = &ha->iobase->isp;
51         status = 0;
52
53         spin_lock(&ha->hardware_lock);
54         vha = qla2x00_get_rsp_host(rsp);
55         for (iter = 50; iter--; ) {
56                 hccr = RD_REG_WORD(&reg->hccr);
57                 if (hccr & HCCR_RISC_PAUSE) {
58                         if (pci_channel_offline(ha->pdev))
59                                 break;
60
61                         /*
62                          * Issue a "HARD" reset in order for the RISC interrupt
63                          * bit to be cleared.  Schedule a big hammmer to get
64                          * out of the RISC PAUSED state.
65                          */
66                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
67                         RD_REG_WORD(&reg->hccr);
68
69                         ha->isp_ops->fw_dump(vha, 1);
70                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
71                         break;
72                 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
73                         break;
74
75                 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
76                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
77                         RD_REG_WORD(&reg->hccr);
78
79                         /* Get mailbox data. */
80                         mb[0] = RD_MAILBOX_REG(ha, reg, 0);
81                         if (mb[0] > 0x3fff && mb[0] < 0x8000) {
82                                 qla2x00_mbx_completion(vha, mb[0]);
83                                 status |= MBX_INTERRUPT;
84                         } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
85                                 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
86                                 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
87                                 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
88                                 qla2x00_async_event(vha, rsp, mb);
89                         } else {
90                                 /*EMPTY*/
91                                 DEBUG2(printk("scsi(%ld): Unrecognized "
92                                     "interrupt type (%d).\n",
93                                     vha->host_no, mb[0]));
94                         }
95                         /* Release mailbox registers. */
96                         WRT_REG_WORD(&reg->semaphore, 0);
97                         RD_REG_WORD(&reg->semaphore);
98                 } else {
99                         qla2x00_process_response_queue(rsp);
100
101                         WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
102                         RD_REG_WORD(&reg->hccr);
103                 }
104         }
105         spin_unlock(&ha->hardware_lock);
106
107         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
109                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
110                 complete(&ha->mbx_intr_comp);
111         }
112
113         return (IRQ_HANDLED);
114 }
115
116 /**
117  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
118  * @irq:
119  * @dev_id: SCSI driver HA context
120  *
121  * Called by system whenever the host adapter generates an interrupt.
122  *
123  * Returns handled flag.
124  */
125 irqreturn_t
126 qla2300_intr_handler(int irq, void *dev_id)
127 {
128         scsi_qla_host_t *vha;
129         struct device_reg_2xxx __iomem *reg;
130         int             status;
131         unsigned long   iter;
132         uint32_t        stat;
133         uint16_t        hccr;
134         uint16_t        mb[4];
135         struct rsp_que *rsp;
136         struct qla_hw_data *ha;
137
138         rsp = (struct rsp_que *) dev_id;
139         if (!rsp) {
140                 printk(KERN_INFO
141                     "%s(): NULL response queue pointer\n", __func__);
142                 return (IRQ_NONE);
143         }
144
145         ha = rsp->hw;
146         reg = &ha->iobase->isp;
147         status = 0;
148
149         spin_lock(&ha->hardware_lock);
150         vha = qla2x00_get_rsp_host(rsp);
151         for (iter = 50; iter--; ) {
152                 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153                 if (stat & HSR_RISC_PAUSED) {
154                         if (pci_channel_offline(ha->pdev))
155                                 break;
156
157                         hccr = RD_REG_WORD(&reg->hccr);
158                         if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
159                                 qla_printk(KERN_INFO, ha, "Parity error -- "
160                                     "HCCR=%x, Dumping firmware!\n", hccr);
161                         else
162                                 qla_printk(KERN_INFO, ha, "RISC paused -- "
163                                     "HCCR=%x, Dumping firmware!\n", hccr);
164
165                         /*
166                          * Issue a "HARD" reset in order for the RISC
167                          * interrupt bit to be cleared.  Schedule a big
168                          * hammmer to get out of the RISC PAUSED state.
169                          */
170                         WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
171                         RD_REG_WORD(&reg->hccr);
172
173                         ha->isp_ops->fw_dump(vha, 1);
174                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
175                         break;
176                 } else if ((stat & HSR_RISC_INT) == 0)
177                         break;
178
179                 switch (stat & 0xff) {
180                 case 0x1:
181                 case 0x2:
182                 case 0x10:
183                 case 0x11:
184                         qla2x00_mbx_completion(vha, MSW(stat));
185                         status |= MBX_INTERRUPT;
186
187                         /* Release mailbox registers. */
188                         WRT_REG_WORD(&reg->semaphore, 0);
189                         break;
190                 case 0x12:
191                         mb[0] = MSW(stat);
192                         mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194                         mb[3] = RD_MAILBOX_REG(ha, reg, 3);
195                         qla2x00_async_event(vha, rsp, mb);
196                         break;
197                 case 0x13:
198                         qla2x00_process_response_queue(rsp);
199                         break;
200                 case 0x15:
201                         mb[0] = MBA_CMPLT_1_16BIT;
202                         mb[1] = MSW(stat);
203                         qla2x00_async_event(vha, rsp, mb);
204                         break;
205                 case 0x16:
206                         mb[0] = MBA_SCSI_COMPLETION;
207                         mb[1] = MSW(stat);
208                         mb[2] = RD_MAILBOX_REG(ha, reg, 2);
209                         qla2x00_async_event(vha, rsp, mb);
210                         break;
211                 default:
212                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
213                             "(%d).\n",
214                             vha->host_no, stat & 0xff));
215                         break;
216                 }
217                 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
218                 RD_REG_WORD_RELAXED(&reg->hccr);
219         }
220         spin_unlock(&ha->hardware_lock);
221
222         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
223             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
224                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
225                 complete(&ha->mbx_intr_comp);
226         }
227
228         return (IRQ_HANDLED);
229 }
230
231 /**
232  * qla2x00_mbx_completion() - Process mailbox command completions.
233  * @ha: SCSI driver HA context
234  * @mb0: Mailbox0 register
235  */
236 static void
237 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
238 {
239         uint16_t        cnt;
240         uint16_t __iomem *wptr;
241         struct qla_hw_data *ha = vha->hw;
242         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
243
244         /* Load return mailbox registers. */
245         ha->flags.mbox_int = 1;
246         ha->mailbox_out[0] = mb0;
247         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
248
249         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
250                 if (IS_QLA2200(ha) && cnt == 8)
251                         wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
252                 if (cnt == 4 || cnt == 5)
253                         ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
254                 else
255                         ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
256
257                 wptr++;
258         }
259
260         if (ha->mcp) {
261                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
262                     __func__, vha->host_no, ha->mcp->mb[0]));
263         } else {
264                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
265                     __func__, vha->host_no));
266         }
267 }
268
269 /**
270  * qla2x00_async_event() - Process aynchronous events.
271  * @ha: SCSI driver HA context
272  * @mb: Mailbox registers (0 - 3)
273  */
274 void
275 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
276 {
277 #define LS_UNKNOWN      2
278         static char     *link_speeds[5] = { "1", "2", "?", "4", "8" };
279         char            *link_speed;
280         uint16_t        handle_cnt;
281         uint16_t        cnt;
282         uint32_t        handles[5];
283         struct qla_hw_data *ha = vha->hw;
284         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
285         uint32_t        rscn_entry, host_pid;
286         uint8_t         rscn_queue_index;
287         unsigned long   flags;
288
289         /* Setup to process RIO completion. */
290         handle_cnt = 0;
291         switch (mb[0]) {
292         case MBA_SCSI_COMPLETION:
293                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
294                 handle_cnt = 1;
295                 break;
296         case MBA_CMPLT_1_16BIT:
297                 handles[0] = mb[1];
298                 handle_cnt = 1;
299                 mb[0] = MBA_SCSI_COMPLETION;
300                 break;
301         case MBA_CMPLT_2_16BIT:
302                 handles[0] = mb[1];
303                 handles[1] = mb[2];
304                 handle_cnt = 2;
305                 mb[0] = MBA_SCSI_COMPLETION;
306                 break;
307         case MBA_CMPLT_3_16BIT:
308                 handles[0] = mb[1];
309                 handles[1] = mb[2];
310                 handles[2] = mb[3];
311                 handle_cnt = 3;
312                 mb[0] = MBA_SCSI_COMPLETION;
313                 break;
314         case MBA_CMPLT_4_16BIT:
315                 handles[0] = mb[1];
316                 handles[1] = mb[2];
317                 handles[2] = mb[3];
318                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
319                 handle_cnt = 4;
320                 mb[0] = MBA_SCSI_COMPLETION;
321                 break;
322         case MBA_CMPLT_5_16BIT:
323                 handles[0] = mb[1];
324                 handles[1] = mb[2];
325                 handles[2] = mb[3];
326                 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
327                 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
328                 handle_cnt = 5;
329                 mb[0] = MBA_SCSI_COMPLETION;
330                 break;
331         case MBA_CMPLT_2_32BIT:
332                 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
333                 handles[1] = le32_to_cpu(
334                     ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
335                     RD_MAILBOX_REG(ha, reg, 6));
336                 handle_cnt = 2;
337                 mb[0] = MBA_SCSI_COMPLETION;
338                 break;
339         default:
340                 break;
341         }
342
343         switch (mb[0]) {
344         case MBA_SCSI_COMPLETION:       /* Fast Post */
345                 if (!vha->flags.online)
346                         break;
347
348                 for (cnt = 0; cnt < handle_cnt; cnt++)
349                         qla2x00_process_completed_request(vha, rsp->req,
350                                 handles[cnt]);
351                 break;
352
353         case MBA_RESET:                 /* Reset */
354                 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
355                         vha->host_no));
356
357                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
358                 break;
359
360         case MBA_SYSTEM_ERR:            /* System Error */
361                 qla_printk(KERN_INFO, ha,
362                     "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
363                     mb[1], mb[2], mb[3]);
364
365                 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
366                 ha->isp_ops->fw_dump(vha, 1);
367
368                 if (IS_FWI2_CAPABLE(ha)) {
369                         if (mb[1] == 0 && mb[2] == 0) {
370                                 qla_printk(KERN_ERR, ha,
371                                     "Unrecoverable Hardware Error: adapter "
372                                     "marked OFFLINE!\n");
373                                 vha->flags.online = 0;
374                         } else
375                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
376                 } else if (mb[1] == 0) {
377                         qla_printk(KERN_INFO, ha,
378                             "Unrecoverable Hardware Error: adapter marked "
379                             "OFFLINE!\n");
380                         vha->flags.online = 0;
381                 } else
382                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
383                 break;
384
385         case MBA_REQ_TRANSFER_ERR:      /* Request Transfer Error */
386                 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
387                     vha->host_no));
388                 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
389
390                 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
391                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
392                 break;
393
394         case MBA_RSP_TRANSFER_ERR:      /* Response Transfer Error */
395                 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
396                     vha->host_no));
397                 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
398
399                 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
400                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
401                 break;
402
403         case MBA_WAKEUP_THRES:          /* Request Queue Wake-up */
404                 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
405                     vha->host_no));
406                 break;
407
408         case MBA_LIP_OCCURRED:          /* Loop Initialization Procedure */
409                 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
410                     mb[1]));
411                 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
412
413                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
414                         atomic_set(&vha->loop_state, LOOP_DOWN);
415                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
416                         qla2x00_mark_all_devices_lost(vha, 1);
417                 }
418
419                 if (vha->vp_idx) {
420                         atomic_set(&vha->vp_state, VP_FAILED);
421                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
422                 }
423
424                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
425                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
426
427                 vha->flags.management_server_logged_in = 0;
428                 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
429                 break;
430
431         case MBA_LOOP_UP:               /* Loop Up Event */
432                 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
433                         link_speed = link_speeds[0];
434                         ha->link_data_rate = PORT_SPEED_1GB;
435                 } else {
436                         link_speed = link_speeds[LS_UNKNOWN];
437                         if (mb[1] < 5)
438                                 link_speed = link_speeds[mb[1]];
439                         ha->link_data_rate = mb[1];
440                 }
441
442                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
443                     vha->host_no, link_speed));
444                 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
445                     link_speed);
446
447                 vha->flags.management_server_logged_in = 0;
448                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
449                 break;
450
451         case MBA_LOOP_DOWN:             /* Loop Down Event */
452                 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
453                     "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
454                 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
455                     mb[1], mb[2], mb[3]);
456
457                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
458                         atomic_set(&vha->loop_state, LOOP_DOWN);
459                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
460                         vha->device_flags |= DFLG_NO_CABLE;
461                         qla2x00_mark_all_devices_lost(vha, 1);
462                 }
463
464                 if (vha->vp_idx) {
465                         atomic_set(&vha->vp_state, VP_FAILED);
466                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
467                 }
468
469                 vha->flags.management_server_logged_in = 0;
470                 ha->link_data_rate = PORT_SPEED_UNKNOWN;
471                 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
472                 break;
473
474         case MBA_LIP_RESET:             /* LIP reset occurred */
475                 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
476                     vha->host_no, mb[1]));
477                 qla_printk(KERN_INFO, ha,
478                     "LIP reset occurred (%x).\n", mb[1]);
479
480                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
481                         atomic_set(&vha->loop_state, LOOP_DOWN);
482                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
483                         qla2x00_mark_all_devices_lost(vha, 1);
484                 }
485
486                 if (vha->vp_idx) {
487                         atomic_set(&vha->vp_state, VP_FAILED);
488                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
489                 }
490
491                 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
492
493                 ha->operating_mode = LOOP;
494                 vha->flags.management_server_logged_in = 0;
495                 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
496                 break;
497
498         case MBA_POINT_TO_POINT:        /* Point-to-Point */
499                 if (IS_QLA2100(ha))
500                         break;
501
502                 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
503                     vha->host_no));
504
505                 /*
506                  * Until there's a transition from loop down to loop up, treat
507                  * this as loop down only.
508                  */
509                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
510                         atomic_set(&vha->loop_state, LOOP_DOWN);
511                         if (!atomic_read(&vha->loop_down_timer))
512                                 atomic_set(&vha->loop_down_timer,
513                                     LOOP_DOWN_TIME);
514                         qla2x00_mark_all_devices_lost(vha, 1);
515                 }
516
517                 if (vha->vp_idx) {
518                         atomic_set(&vha->vp_state, VP_FAILED);
519                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
520                 }
521
522                 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
523                         set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
524
525                 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
526                 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
527
528                 ha->flags.gpsc_supported = 1;
529                 vha->flags.management_server_logged_in = 0;
530                 break;
531
532         case MBA_CHG_IN_CONNECTION:     /* Change in connection mode */
533                 if (IS_QLA2100(ha))
534                         break;
535
536                 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
537                     "received.\n",
538                     vha->host_no));
539                 qla_printk(KERN_INFO, ha,
540                     "Configuration change detected: value=%x.\n", mb[1]);
541
542                 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
543                         atomic_set(&vha->loop_state, LOOP_DOWN);
544                         if (!atomic_read(&vha->loop_down_timer))
545                                 atomic_set(&vha->loop_down_timer,
546                                     LOOP_DOWN_TIME);
547                         qla2x00_mark_all_devices_lost(vha, 1);
548                 }
549
550                 if (vha->vp_idx) {
551                         atomic_set(&vha->vp_state, VP_FAILED);
552                         fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
553                 }
554
555                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
556                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
557                 break;
558
559         case MBA_PORT_UPDATE:           /* Port database update */
560                 /* Only handle SCNs for our Vport index. */
561                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
562                         break;
563
564                 /*
565                  * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
566                  * event etc. earlier indicating loop is down) then process
567                  * it.  Otherwise ignore it and Wait for RSCN to come in.
568                  */
569                 atomic_set(&vha->loop_down_timer, 0);
570                 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
571                     atomic_read(&vha->loop_state) != LOOP_DEAD) {
572                         DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
573                             "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
574                             mb[2], mb[3]));
575                         break;
576                 }
577
578                 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
579                     vha->host_no));
580                 DEBUG(printk(KERN_INFO
581                     "scsi(%ld): Port database changed %04x %04x %04x.\n",
582                     vha->host_no, mb[1], mb[2], mb[3]));
583
584                 /*
585                  * Mark all devices as missing so we will login again.
586                  */
587                 atomic_set(&vha->loop_state, LOOP_UP);
588
589                 qla2x00_mark_all_devices_lost(vha, 1);
590
591                 vha->flags.rscn_queue_overflow = 1;
592
593                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
594                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
595                 break;
596
597         case MBA_RSCN_UPDATE:           /* State Change Registration */
598                 /* Check if the Vport has issued a SCR */
599                 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
600                         break;
601                 /* Only handle SCNs for our Vport index. */
602                 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
603                         break;
604                 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
605                     vha->host_no));
606                 DEBUG(printk(KERN_INFO
607                     "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
608                     vha->host_no, mb[1], mb[2], mb[3]));
609
610                 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
611                 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
612                                 | vha->d_id.b.al_pa;
613                 if (rscn_entry == host_pid) {
614                         DEBUG(printk(KERN_INFO
615                             "scsi(%ld): Ignoring RSCN update to local host "
616                             "port ID (%06x)\n",
617                             vha->host_no, host_pid));
618                         break;
619                 }
620
621                 /* Ignore reserved bits from RSCN-payload. */
622                 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
623                 rscn_queue_index = vha->rscn_in_ptr + 1;
624                 if (rscn_queue_index == MAX_RSCN_COUNT)
625                         rscn_queue_index = 0;
626                 if (rscn_queue_index != vha->rscn_out_ptr) {
627                         vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
628                         vha->rscn_in_ptr = rscn_queue_index;
629                 } else {
630                         vha->flags.rscn_queue_overflow = 1;
631                 }
632
633                 atomic_set(&vha->loop_state, LOOP_UPDATE);
634                 atomic_set(&vha->loop_down_timer, 0);
635                 vha->flags.management_server_logged_in = 0;
636
637                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
638                 set_bit(RSCN_UPDATE, &vha->dpc_flags);
639                 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
640                 break;
641
642         /* case MBA_RIO_RESPONSE: */
643         case MBA_ZIO_RESPONSE:
644                 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
645                     vha->host_no));
646                 DEBUG(printk(KERN_INFO
647                     "scsi(%ld): [R|Z]IO update completion.\n",
648                     vha->host_no));
649
650                 if (IS_FWI2_CAPABLE(ha))
651                         qla24xx_process_response_queue(rsp);
652                 else
653                         qla2x00_process_response_queue(rsp);
654                 break;
655
656         case MBA_DISCARD_RND_FRAME:
657                 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
658                     "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
659                 break;
660
661         case MBA_TRACE_NOTIFICATION:
662                 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
663                 vha->host_no, mb[1], mb[2]));
664                 break;
665
666         case MBA_ISP84XX_ALERT:
667                 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
668                     "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
669
670                 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
671                 switch (mb[1]) {
672                 case A84_PANIC_RECOVERY:
673                         qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
674                             "%04x %04x\n", mb[2], mb[3]);
675                         break;
676                 case A84_OP_LOGIN_COMPLETE:
677                         ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
678                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
679                             "firmware version %x\n", ha->cs84xx->op_fw_version));
680                         break;
681                 case A84_DIAG_LOGIN_COMPLETE:
682                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
683                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
684                             "diagnostic firmware version %x\n",
685                             ha->cs84xx->diag_fw_version));
686                         break;
687                 case A84_GOLD_LOGIN_COMPLETE:
688                         ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
689                         ha->cs84xx->fw_update = 1;
690                         DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
691                             "firmware version %x\n",
692                             ha->cs84xx->gold_fw_version));
693                         break;
694                 default:
695                         qla_printk(KERN_ERR, ha,
696                             "Alert 84xx: Invalid Alert %04x %04x %04x\n",
697                             mb[1], mb[2], mb[3]);
698                 }
699                 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
700                 break;
701         }
702
703         if (!vha->vp_idx && ha->num_vhosts)
704                 qla2x00_alert_all_vps(rsp, mb);
705 }
706
707 static void
708 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
709 {
710         fc_port_t *fcport = data;
711         struct scsi_qla_host *vha = fcport->vha;
712         struct qla_hw_data *ha = vha->hw;
713         struct req_que *req = NULL;
714
715         req = ha->req_q_map[vha->req_ques[0]];
716         if (!req)
717                 return;
718         if (req->max_q_depth <= sdev->queue_depth)
719                 return;
720
721         if (sdev->ordered_tags)
722                 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
723                     sdev->queue_depth + 1);
724         else
725                 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
726                     sdev->queue_depth + 1);
727
728         fcport->last_ramp_up = jiffies;
729
730         DEBUG2(qla_printk(KERN_INFO, ha,
731             "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
732             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
733             sdev->queue_depth));
734 }
735
736 static void
737 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
738 {
739         fc_port_t *fcport = data;
740
741         if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
742                 return;
743
744         DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
745             "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
746             fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
747             sdev->queue_depth));
748 }
749
750 static inline void
751 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
752                                                                 srb_t *sp)
753 {
754         fc_port_t *fcport;
755         struct scsi_device *sdev;
756
757         sdev = sp->cmd->device;
758         if (sdev->queue_depth >= req->max_q_depth)
759                 return;
760
761         fcport = sp->fcport;
762         if (time_before(jiffies,
763             fcport->last_ramp_up + ql2xqfullrampup * HZ))
764                 return;
765         if (time_before(jiffies,
766             fcport->last_queue_full + ql2xqfullrampup * HZ))
767                 return;
768
769         starget_for_each_device(sdev->sdev_target, fcport,
770             qla2x00_adjust_sdev_qdepth_up);
771 }
772
773 /**
774  * qla2x00_process_completed_request() - Process a Fast Post response.
775  * @ha: SCSI driver HA context
776  * @index: SRB index
777  */
778 static void
779 qla2x00_process_completed_request(struct scsi_qla_host *vha,
780                                 struct req_que *req, uint32_t index)
781 {
782         srb_t *sp;
783         struct qla_hw_data *ha = vha->hw;
784
785         /* Validate handle. */
786         if (index >= MAX_OUTSTANDING_COMMANDS) {
787                 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
788                     vha->host_no, index));
789                 qla_printk(KERN_WARNING, ha,
790                     "Invalid SCSI completion handle %d.\n", index);
791
792                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
793                 return;
794         }
795
796         sp = req->outstanding_cmds[index];
797         if (sp) {
798                 /* Free outstanding command slot. */
799                 req->outstanding_cmds[index] = NULL;
800
801                 CMD_COMPL_STATUS(sp->cmd) = 0L;
802                 CMD_SCSI_STATUS(sp->cmd) = 0L;
803
804                 /* Save ISP completion status */
805                 sp->cmd->result = DID_OK << 16;
806
807                 qla2x00_ramp_up_queue_depth(vha, req, sp);
808                 qla2x00_sp_compl(ha, sp);
809         } else {
810                 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
811                     vha->host_no));
812                 qla_printk(KERN_WARNING, ha,
813                     "Invalid ISP SCSI completion handle\n");
814
815                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
816         }
817 }
818
819 /**
820  * qla2x00_process_response_queue() - Process response queue entries.
821  * @ha: SCSI driver HA context
822  */
823 void
824 qla2x00_process_response_queue(struct rsp_que *rsp)
825 {
826         struct scsi_qla_host *vha;
827         struct qla_hw_data *ha = rsp->hw;
828         struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
829         sts_entry_t     *pkt;
830         uint16_t        handle_cnt;
831         uint16_t        cnt;
832
833         vha = qla2x00_get_rsp_host(rsp);
834
835         if (!vha->flags.online)
836                 return;
837
838         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
839                 pkt = (sts_entry_t *)rsp->ring_ptr;
840
841                 rsp->ring_index++;
842                 if (rsp->ring_index == rsp->length) {
843                         rsp->ring_index = 0;
844                         rsp->ring_ptr = rsp->ring;
845                 } else {
846                         rsp->ring_ptr++;
847                 }
848
849                 if (pkt->entry_status != 0) {
850                         DEBUG3(printk(KERN_INFO
851                             "scsi(%ld): Process error entry.\n", vha->host_no));
852
853                         qla2x00_error_entry(vha, rsp, pkt);
854                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
855                         wmb();
856                         continue;
857                 }
858
859                 switch (pkt->entry_type) {
860                 case STATUS_TYPE:
861                         qla2x00_status_entry(vha, rsp, pkt);
862                         break;
863                 case STATUS_TYPE_21:
864                         handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
865                         for (cnt = 0; cnt < handle_cnt; cnt++) {
866                                 qla2x00_process_completed_request(vha, rsp->req,
867                                     ((sts21_entry_t *)pkt)->handle[cnt]);
868                         }
869                         break;
870                 case STATUS_TYPE_22:
871                         handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
872                         for (cnt = 0; cnt < handle_cnt; cnt++) {
873                                 qla2x00_process_completed_request(vha, rsp->req,
874                                     ((sts22_entry_t *)pkt)->handle[cnt]);
875                         }
876                         break;
877                 case STATUS_CONT_TYPE:
878                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
879                         break;
880                 default:
881                         /* Type Not Supported. */
882                         DEBUG4(printk(KERN_WARNING
883                             "scsi(%ld): Received unknown response pkt type %x "
884                             "entry status=%x.\n",
885                             vha->host_no, pkt->entry_type, pkt->entry_status));
886                         break;
887                 }
888                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
889                 wmb();
890         }
891
892         /* Adjust ring index */
893         WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
894 }
895
896 static inline void
897 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
898 {
899         struct scsi_cmnd *cp = sp->cmd;
900
901         if (sense_len >= SCSI_SENSE_BUFFERSIZE)
902                 sense_len = SCSI_SENSE_BUFFERSIZE;
903
904         CMD_ACTUAL_SNSLEN(cp) = sense_len;
905         sp->request_sense_length = sense_len;
906         sp->request_sense_ptr = cp->sense_buffer;
907         if (sp->request_sense_length > 32)
908                 sense_len = 32;
909
910         memcpy(cp->sense_buffer, sense_data, sense_len);
911
912         sp->request_sense_ptr += sense_len;
913         sp->request_sense_length -= sense_len;
914         if (sp->request_sense_length != 0)
915                 sp->fcport->vha->status_srb = sp;
916
917         DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
918             "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
919             cp->device->channel, cp->device->id, cp->device->lun, cp,
920             cp->serial_number));
921         if (sense_len)
922                 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
923                     CMD_ACTUAL_SNSLEN(cp)));
924 }
925
926 /**
927  * qla2x00_status_entry() - Process a Status IOCB entry.
928  * @ha: SCSI driver HA context
929  * @pkt: Entry pointer
930  */
931 static void
932 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
933 {
934         srb_t           *sp;
935         fc_port_t       *fcport;
936         struct scsi_cmnd *cp;
937         sts_entry_t *sts;
938         struct sts_entry_24xx *sts24;
939         uint16_t        comp_status;
940         uint16_t        scsi_status;
941         uint8_t         lscsi_status;
942         int32_t         resid;
943         uint32_t        sense_len, rsp_info_len, resid_len, fw_resid_len;
944         uint8_t         *rsp_info, *sense_data;
945         struct qla_hw_data *ha = vha->hw;
946         struct req_que *req = rsp->req;
947
948         sts = (sts_entry_t *) pkt;
949         sts24 = (struct sts_entry_24xx *) pkt;
950         if (IS_FWI2_CAPABLE(ha)) {
951                 comp_status = le16_to_cpu(sts24->comp_status);
952                 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
953         } else {
954                 comp_status = le16_to_cpu(sts->comp_status);
955                 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
956         }
957
958         /* Fast path completion. */
959         if (comp_status == CS_COMPLETE && scsi_status == 0) {
960                 qla2x00_process_completed_request(vha, req, sts->handle);
961
962                 return;
963         }
964
965         /* Validate handle. */
966         if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
967                 sp = req->outstanding_cmds[sts->handle];
968                 req->outstanding_cmds[sts->handle] = NULL;
969         } else
970                 sp = NULL;
971
972         if (sp == NULL) {
973                 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
974                     vha->host_no));
975                 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
976
977                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
978                 qla2xxx_wake_dpc(vha);
979                 return;
980         }
981         cp = sp->cmd;
982         if (cp == NULL) {
983                 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
984                     "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
985                 qla_printk(KERN_WARNING, ha,
986                     "Command is NULL: already returned to OS (sp=%p)\n", sp);
987
988                 return;
989         }
990
991         lscsi_status = scsi_status & STATUS_MASK;
992         CMD_ENTRY_STATUS(cp) = sts->entry_status;
993         CMD_COMPL_STATUS(cp) = comp_status;
994         CMD_SCSI_STATUS(cp) = scsi_status;
995
996         fcport = sp->fcport;
997
998         sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
999         if (IS_FWI2_CAPABLE(ha)) {
1000                 sense_len = le32_to_cpu(sts24->sense_len);
1001                 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1002                 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1003                 fw_resid_len = le32_to_cpu(sts24->residual_len);
1004                 rsp_info = sts24->data;
1005                 sense_data = sts24->data;
1006                 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1007         } else {
1008                 sense_len = le16_to_cpu(sts->req_sense_length);
1009                 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1010                 resid_len = le32_to_cpu(sts->residual_length);
1011                 rsp_info = sts->rsp_info;
1012                 sense_data = sts->req_sense_data;
1013         }
1014
1015         /* Check for any FCP transport errors. */
1016         if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1017                 /* Sense data lies beyond any FCP RESPONSE data. */
1018                 if (IS_FWI2_CAPABLE(ha))
1019                         sense_data += rsp_info_len;
1020                 if (rsp_info_len > 3 && rsp_info[3]) {
1021                         DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1022                             "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1023                             "retrying command\n", vha->host_no,
1024                             cp->device->channel, cp->device->id,
1025                             cp->device->lun, rsp_info_len, rsp_info[0],
1026                             rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1027                             rsp_info[5], rsp_info[6], rsp_info[7]));
1028
1029                         cp->result = DID_BUS_BUSY << 16;
1030                         qla2x00_sp_compl(ha, sp);
1031                         return;
1032                 }
1033         }
1034
1035         /* Check for overrun. */
1036         if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1037             scsi_status & SS_RESIDUAL_OVER)
1038                 comp_status = CS_DATA_OVERRUN;
1039
1040         /*
1041          * Based on Host and scsi status generate status code for Linux
1042          */
1043         switch (comp_status) {
1044         case CS_COMPLETE:
1045         case CS_QUEUE_FULL:
1046                 if (scsi_status == 0) {
1047                         cp->result = DID_OK << 16;
1048                         break;
1049                 }
1050                 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1051                         resid = resid_len;
1052                         scsi_set_resid(cp, resid);
1053                         CMD_RESID_LEN(cp) = resid;
1054
1055                         if (!lscsi_status &&
1056                             ((unsigned)(scsi_bufflen(cp) - resid) <
1057                              cp->underflow)) {
1058                                 qla_printk(KERN_INFO, ha,
1059                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1060                                            "detected (%x of %x bytes)...returning "
1061                                            "error status.\n", vha->host_no,
1062                                            cp->device->channel, cp->device->id,
1063                                            cp->device->lun, resid,
1064                                            scsi_bufflen(cp));
1065
1066                                 cp->result = DID_ERROR << 16;
1067                                 break;
1068                         }
1069                 }
1070                 cp->result = DID_OK << 16 | lscsi_status;
1071
1072                 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1073                         DEBUG2(printk(KERN_INFO
1074                             "scsi(%ld): QUEUE FULL status detected "
1075                             "0x%x-0x%x.\n", vha->host_no, comp_status,
1076                             scsi_status));
1077
1078                         /* Adjust queue depth for all luns on the port. */
1079                         fcport->last_queue_full = jiffies;
1080                         starget_for_each_device(cp->device->sdev_target,
1081                             fcport, qla2x00_adjust_sdev_qdepth_down);
1082                         break;
1083                 }
1084                 if (lscsi_status != SS_CHECK_CONDITION)
1085                         break;
1086
1087                 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1088                 if (!(scsi_status & SS_SENSE_LEN_VALID))
1089                         break;
1090
1091                 qla2x00_handle_sense(sp, sense_data, sense_len);
1092                 break;
1093
1094         case CS_DATA_UNDERRUN:
1095                 resid = resid_len;
1096                 /* Use F/W calculated residual length. */
1097                 if (IS_FWI2_CAPABLE(ha)) {
1098                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1099                                 lscsi_status = 0;
1100                         } else if (resid != fw_resid_len) {
1101                                 scsi_status &= ~SS_RESIDUAL_UNDER;
1102                                 lscsi_status = 0;
1103                         }
1104                         resid = fw_resid_len;
1105                 }
1106
1107                 if (scsi_status & SS_RESIDUAL_UNDER) {
1108                         scsi_set_resid(cp, resid);
1109                         CMD_RESID_LEN(cp) = resid;
1110                 } else {
1111                         DEBUG2(printk(KERN_INFO
1112                             "scsi(%ld:%d:%d) UNDERRUN status detected "
1113                             "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1114                             "os_underflow=0x%x\n", vha->host_no,
1115                             cp->device->id, cp->device->lun, comp_status,
1116                             scsi_status, resid_len, resid, cp->cmnd[0],
1117                             cp->underflow));
1118
1119                 }
1120
1121                 /*
1122                  * Check to see if SCSI Status is non zero. If so report SCSI
1123                  * Status.
1124                  */
1125                 if (lscsi_status != 0) {
1126                         cp->result = DID_OK << 16 | lscsi_status;
1127
1128                         if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1129                                 DEBUG2(printk(KERN_INFO
1130                                     "scsi(%ld): QUEUE FULL status detected "
1131                                     "0x%x-0x%x.\n", vha->host_no, comp_status,
1132                                     scsi_status));
1133
1134                                 /*
1135                                  * Adjust queue depth for all luns on the
1136                                  * port.
1137                                  */
1138                                 fcport->last_queue_full = jiffies;
1139                                 starget_for_each_device(
1140                                     cp->device->sdev_target, fcport,
1141                                     qla2x00_adjust_sdev_qdepth_down);
1142                                 break;
1143                         }
1144                         if (lscsi_status != SS_CHECK_CONDITION)
1145                                 break;
1146
1147                         memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1148                         if (!(scsi_status & SS_SENSE_LEN_VALID))
1149                                 break;
1150
1151                         qla2x00_handle_sense(sp, sense_data, sense_len);
1152                 } else {
1153                         /*
1154                          * If RISC reports underrun and target does not report
1155                          * it then we must have a lost frame, so tell upper
1156                          * layer to retry it by reporting a bus busy.
1157                          */
1158                         if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1159                                 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1160                                               "frame(s) detected (%x of %x bytes)..."
1161                                               "retrying command.\n",
1162                                         vha->host_no, cp->device->channel,
1163                                         cp->device->id, cp->device->lun, resid,
1164                                         scsi_bufflen(cp)));
1165
1166                                 cp->result = DID_BUS_BUSY << 16;
1167                                 break;
1168                         }
1169
1170                         /* Handle mid-layer underflow */
1171                         if ((unsigned)(scsi_bufflen(cp) - resid) <
1172                             cp->underflow) {
1173                                 qla_printk(KERN_INFO, ha,
1174                                            "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1175                                            "detected (%x of %x bytes)...returning "
1176                                            "error status.\n", vha->host_no,
1177                                            cp->device->channel, cp->device->id,
1178                                            cp->device->lun, resid,
1179                                            scsi_bufflen(cp));
1180
1181                                 cp->result = DID_ERROR << 16;
1182                                 break;
1183                         }
1184
1185                         /* Everybody online, looking good... */
1186                         cp->result = DID_OK << 16;
1187                 }
1188                 break;
1189
1190         case CS_DATA_OVERRUN:
1191                 DEBUG2(printk(KERN_INFO
1192                     "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1193                     vha->host_no, cp->device->id, cp->device->lun, comp_status,
1194                     scsi_status));
1195                 DEBUG2(printk(KERN_INFO
1196                     "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1197                     cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1198                     cp->cmnd[4], cp->cmnd[5]));
1199                 DEBUG2(printk(KERN_INFO
1200                     "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1201                     "status!\n",
1202                     cp->serial_number, scsi_bufflen(cp), resid_len));
1203
1204                 cp->result = DID_ERROR << 16;
1205                 break;
1206
1207         case CS_PORT_LOGGED_OUT:
1208         case CS_PORT_CONFIG_CHG:
1209         case CS_PORT_BUSY:
1210         case CS_INCOMPLETE:
1211         case CS_PORT_UNAVAILABLE:
1212                 /*
1213                  * If the port is in Target Down state, return all IOs for this
1214                  * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1215                  * retry_queue.
1216                  */
1217                 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1218                     "pid=%ld, compl status=0x%x, port state=0x%x\n",
1219                     vha->host_no, cp->device->id, cp->device->lun,
1220                     cp->serial_number, comp_status,
1221                     atomic_read(&fcport->state)));
1222
1223                 /*
1224                  * We are going to have the fc class block the rport
1225                  * while we try to recover so instruct the mid layer
1226                  * to requeue until the class decides how to handle this.
1227                  */
1228                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1229                 if (atomic_read(&fcport->state) == FCS_ONLINE)
1230                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1231                 break;
1232
1233         case CS_RESET:
1234                 DEBUG2(printk(KERN_INFO
1235                     "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1236                     vha->host_no, comp_status, scsi_status));
1237
1238                 cp->result = DID_RESET << 16;
1239                 break;
1240
1241         case CS_ABORTED:
1242                 /*
1243                  * hv2.19.12 - DID_ABORT does not retry the request if we
1244                  * aborted this request then abort otherwise it must be a
1245                  * reset.
1246                  */
1247                 DEBUG2(printk(KERN_INFO
1248                     "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1249                     vha->host_no, comp_status, scsi_status));
1250
1251                 cp->result = DID_RESET << 16;
1252                 break;
1253
1254         case CS_TIMEOUT:
1255                 /*
1256                  * We are going to have the fc class block the rport
1257                  * while we try to recover so instruct the mid layer
1258                  * to requeue until the class decides how to handle this.
1259                  */
1260                 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1261
1262                 if (IS_FWI2_CAPABLE(ha)) {
1263                         DEBUG2(printk(KERN_INFO
1264                             "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1265                             "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1266                             cp->device->id, cp->device->lun, comp_status,
1267                             scsi_status));
1268                         break;
1269                 }
1270                 DEBUG2(printk(KERN_INFO
1271                     "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1272                     "sflags=%x.\n", vha->host_no, cp->device->channel,
1273                     cp->device->id, cp->device->lun, comp_status, scsi_status,
1274                     le16_to_cpu(sts->status_flags)));
1275
1276                 /* Check to see if logout occurred. */
1277                 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1278                         qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1279                 break;
1280
1281         default:
1282                 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1283                     "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1284                 qla_printk(KERN_INFO, ha,
1285                     "Unknown status detected 0x%x-0x%x.\n",
1286                     comp_status, scsi_status);
1287
1288                 cp->result = DID_ERROR << 16;
1289                 break;
1290         }
1291
1292         /* Place command on done queue. */
1293         if (vha->status_srb == NULL)
1294                 qla2x00_sp_compl(ha, sp);
1295 }
1296
1297 /**
1298  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1299  * @ha: SCSI driver HA context
1300  * @pkt: Entry pointer
1301  *
1302  * Extended sense data.
1303  */
1304 static void
1305 qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1306 {
1307         uint8_t         sense_sz = 0;
1308         struct qla_hw_data *ha = vha->hw;
1309         srb_t           *sp = vha->status_srb;
1310         struct scsi_cmnd *cp;
1311
1312         if (sp != NULL && sp->request_sense_length != 0) {
1313                 cp = sp->cmd;
1314                 if (cp == NULL) {
1315                         DEBUG2(printk("%s(): Cmd already returned back to OS "
1316                             "sp=%p.\n", __func__, sp));
1317                         qla_printk(KERN_INFO, ha,
1318                             "cmd is NULL: already returned to OS (sp=%p)\n",
1319                             sp);
1320
1321                         vha->status_srb = NULL;
1322                         return;
1323                 }
1324
1325                 if (sp->request_sense_length > sizeof(pkt->data)) {
1326                         sense_sz = sizeof(pkt->data);
1327                 } else {
1328                         sense_sz = sp->request_sense_length;
1329                 }
1330
1331                 /* Move sense data. */
1332                 if (IS_FWI2_CAPABLE(ha))
1333                         host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1334                 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1335                 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1336
1337                 sp->request_sense_ptr += sense_sz;
1338                 sp->request_sense_length -= sense_sz;
1339
1340                 /* Place command on done queue. */
1341                 if (sp->request_sense_length == 0) {
1342                         vha->status_srb = NULL;
1343                         qla2x00_sp_compl(ha, sp);
1344                 }
1345         }
1346 }
1347
1348 /**
1349  * qla2x00_error_entry() - Process an error entry.
1350  * @ha: SCSI driver HA context
1351  * @pkt: Entry pointer
1352  */
1353 static void
1354 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1355 {
1356         srb_t *sp;
1357         struct qla_hw_data *ha = vha->hw;
1358         struct req_que *req = rsp->req;
1359 #if defined(QL_DEBUG_LEVEL_2)
1360         if (pkt->entry_status & RF_INV_E_ORDER)
1361                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1362         else if (pkt->entry_status & RF_INV_E_COUNT)
1363                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1364         else if (pkt->entry_status & RF_INV_E_PARAM)
1365                 qla_printk(KERN_ERR, ha,
1366                     "%s: Invalid Entry Parameter\n", __func__);
1367         else if (pkt->entry_status & RF_INV_E_TYPE)
1368                 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1369         else if (pkt->entry_status & RF_BUSY)
1370                 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1371         else
1372                 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1373 #endif
1374
1375         /* Validate handle. */
1376         if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1377                 sp = req->outstanding_cmds[pkt->handle];
1378         else
1379                 sp = NULL;
1380
1381         if (sp) {
1382                 /* Free outstanding command slot. */
1383                 req->outstanding_cmds[pkt->handle] = NULL;
1384
1385                 /* Bad payload or header */
1386                 if (pkt->entry_status &
1387                     (RF_INV_E_ORDER | RF_INV_E_COUNT |
1388                      RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1389                         sp->cmd->result = DID_ERROR << 16;
1390                 } else if (pkt->entry_status & RF_BUSY) {
1391                         sp->cmd->result = DID_BUS_BUSY << 16;
1392                 } else {
1393                         sp->cmd->result = DID_ERROR << 16;
1394                 }
1395                 qla2x00_sp_compl(ha, sp);
1396
1397         } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1398             COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1399                 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1400                     vha->host_no));
1401                 qla_printk(KERN_WARNING, ha,
1402                     "Error entry - invalid handle\n");
1403
1404                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1405                 qla2xxx_wake_dpc(vha);
1406         }
1407 }
1408
1409 /**
1410  * qla24xx_mbx_completion() - Process mailbox command completions.
1411  * @ha: SCSI driver HA context
1412  * @mb0: Mailbox0 register
1413  */
1414 static void
1415 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1416 {
1417         uint16_t        cnt;
1418         uint16_t __iomem *wptr;
1419         struct qla_hw_data *ha = vha->hw;
1420         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1421
1422         /* Load return mailbox registers. */
1423         ha->flags.mbox_int = 1;
1424         ha->mailbox_out[0] = mb0;
1425         wptr = (uint16_t __iomem *)&reg->mailbox1;
1426
1427         for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1428                 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1429                 wptr++;
1430         }
1431
1432         if (ha->mcp) {
1433                 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1434                     __func__, vha->host_no, ha->mcp->mb[0]));
1435         } else {
1436                 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1437                     __func__, vha->host_no));
1438         }
1439 }
1440
1441 /**
1442  * qla24xx_process_response_queue() - Process response queue entries.
1443  * @ha: SCSI driver HA context
1444  */
1445 void
1446 qla24xx_process_response_queue(struct rsp_que *rsp)
1447 {
1448         struct qla_hw_data *ha = rsp->hw;
1449         device_reg_t __iomem *reg = ISP_QUE_REG(ha, rsp->id);
1450         struct sts_entry_24xx *pkt;
1451         struct scsi_qla_host *vha;
1452
1453         vha = qla2x00_get_rsp_host(rsp);
1454
1455         if (!vha->flags.online)
1456                 return;
1457
1458         while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1459                 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1460
1461                 rsp->ring_index++;
1462                 if (rsp->ring_index == rsp->length) {
1463                         rsp->ring_index = 0;
1464                         rsp->ring_ptr = rsp->ring;
1465                 } else {
1466                         rsp->ring_ptr++;
1467                 }
1468
1469                 if (pkt->entry_status != 0) {
1470                         DEBUG3(printk(KERN_INFO
1471                             "scsi(%ld): Process error entry.\n", vha->host_no));
1472
1473                         qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1474                         ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1475                         wmb();
1476                         continue;
1477                 }
1478
1479                 switch (pkt->entry_type) {
1480                 case STATUS_TYPE:
1481                         qla2x00_status_entry(vha, rsp, pkt);
1482                         break;
1483                 case STATUS_CONT_TYPE:
1484                         qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1485                         break;
1486                 case VP_RPT_ID_IOCB_TYPE:
1487                         qla24xx_report_id_acquisition(vha,
1488                             (struct vp_rpt_id_entry_24xx *)pkt);
1489                         break;
1490                 default:
1491                         /* Type Not Supported. */
1492                         DEBUG4(printk(KERN_WARNING
1493                             "scsi(%ld): Received unknown response pkt type %x "
1494                             "entry status=%x.\n",
1495                             vha->host_no, pkt->entry_type, pkt->entry_status));
1496                         break;
1497                 }
1498                 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1499                 wmb();
1500         }
1501
1502         /* Adjust ring index */
1503         if (ha->mqenable)
1504                 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, rsp->ring_index);
1505         else
1506                 WRT_REG_DWORD(&reg->isp24.rsp_q_out, rsp->ring_index);
1507 }
1508
1509 static void
1510 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1511 {
1512         int rval;
1513         uint32_t cnt;
1514         struct qla_hw_data *ha = vha->hw;
1515         struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1516
1517         if (!IS_QLA25XX(ha))
1518                 return;
1519
1520         rval = QLA_SUCCESS;
1521         WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1522         RD_REG_DWORD(&reg->iobase_addr);
1523         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1524         for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1525             rval == QLA_SUCCESS; cnt--) {
1526                 if (cnt) {
1527                         WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1528                         udelay(10);
1529                 } else
1530                         rval = QLA_FUNCTION_TIMEOUT;
1531         }
1532         if (rval == QLA_SUCCESS)
1533                 goto next_test;
1534
1535         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1536         for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1537             rval == QLA_SUCCESS; cnt--) {
1538                 if (cnt) {
1539                         WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1540                         udelay(10);
1541                 } else
1542                         rval = QLA_FUNCTION_TIMEOUT;
1543         }
1544         if (rval != QLA_SUCCESS)
1545                 goto done;
1546
1547 next_test:
1548         if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1549                 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1550
1551 done:
1552         WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1553         RD_REG_DWORD(&reg->iobase_window);
1554 }
1555
1556 /**
1557  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1558  * @irq:
1559  * @dev_id: SCSI driver HA context
1560  *
1561  * Called by system whenever the host adapter generates an interrupt.
1562  *
1563  * Returns handled flag.
1564  */
1565 irqreturn_t
1566 qla24xx_intr_handler(int irq, void *dev_id)
1567 {
1568         scsi_qla_host_t *vha;
1569         struct qla_hw_data *ha;
1570         struct device_reg_24xx __iomem *reg;
1571         int             status;
1572         unsigned long   iter;
1573         uint32_t        stat;
1574         uint32_t        hccr;
1575         uint16_t        mb[4];
1576         struct rsp_que *rsp;
1577
1578         rsp = (struct rsp_que *) dev_id;
1579         if (!rsp) {
1580                 printk(KERN_INFO
1581                     "%s(): NULL response queue pointer\n", __func__);
1582                 return IRQ_NONE;
1583         }
1584
1585         ha = rsp->hw;
1586         reg = &ha->iobase->isp24;
1587         status = 0;
1588
1589         spin_lock(&ha->hardware_lock);
1590         vha = qla2x00_get_rsp_host(rsp);
1591         for (iter = 50; iter--; ) {
1592                 stat = RD_REG_DWORD(&reg->host_status);
1593                 if (stat & HSRX_RISC_PAUSED) {
1594                         if (pci_channel_offline(ha->pdev))
1595                                 break;
1596
1597                         if (ha->hw_event_pause_errors == 0)
1598                                 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1599                                     0, MSW(stat), LSW(stat));
1600                         else if (ha->hw_event_pause_errors < 0xffffffff)
1601                                 ha->hw_event_pause_errors++;
1602
1603                         hccr = RD_REG_DWORD(&reg->hccr);
1604
1605                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1606                             "Dumping firmware!\n", hccr);
1607
1608                         qla2xxx_check_risc_status(vha);
1609
1610                         ha->isp_ops->fw_dump(vha, 1);
1611                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1612                         break;
1613                 } else if ((stat & HSRX_RISC_INT) == 0)
1614                         break;
1615
1616                 switch (stat & 0xff) {
1617                 case 0x1:
1618                 case 0x2:
1619                 case 0x10:
1620                 case 0x11:
1621                         qla24xx_mbx_completion(vha, MSW(stat));
1622                         status |= MBX_INTERRUPT;
1623
1624                         break;
1625                 case 0x12:
1626                         mb[0] = MSW(stat);
1627                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1628                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1629                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1630                         qla2x00_async_event(vha, rsp, mb);
1631                         break;
1632                 case 0x13:
1633                 case 0x14:
1634                         qla24xx_process_response_queue(rsp);
1635                         break;
1636                 default:
1637                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1638                             "(%d).\n",
1639                             vha->host_no, stat & 0xff));
1640                         break;
1641                 }
1642                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1643                 RD_REG_DWORD_RELAXED(&reg->hccr);
1644         }
1645         spin_unlock(&ha->hardware_lock);
1646
1647         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1648             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1649                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1650                 complete(&ha->mbx_intr_comp);
1651         }
1652
1653         return IRQ_HANDLED;
1654 }
1655
1656 static irqreturn_t
1657 qla24xx_msix_rsp_q(int irq, void *dev_id)
1658 {
1659         struct qla_hw_data *ha;
1660         struct rsp_que *rsp;
1661         struct device_reg_24xx __iomem *reg;
1662
1663         rsp = (struct rsp_que *) dev_id;
1664         if (!rsp) {
1665                 printk(KERN_INFO
1666                 "%s(): NULL response queue pointer\n", __func__);
1667                 return IRQ_NONE;
1668         }
1669         ha = rsp->hw;
1670         reg = &ha->iobase->isp24;
1671
1672         spin_lock_irq(&ha->hardware_lock);
1673
1674         qla24xx_process_response_queue(rsp);
1675         WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1676
1677         spin_unlock_irq(&ha->hardware_lock);
1678
1679         return IRQ_HANDLED;
1680 }
1681
1682 static irqreturn_t
1683 qla25xx_msix_rsp_q(int irq, void *dev_id)
1684 {
1685         struct qla_hw_data *ha;
1686         struct rsp_que *rsp;
1687         struct device_reg_24xx __iomem *reg;
1688         uint16_t msix_disabled_hccr = 0;
1689
1690         rsp = (struct rsp_que *) dev_id;
1691         if (!rsp) {
1692                 printk(KERN_INFO
1693                         "%s(): NULL response queue pointer\n", __func__);
1694                 return IRQ_NONE;
1695         }
1696         ha = rsp->hw;
1697         reg = &ha->iobase->isp24;
1698
1699         spin_lock_irq(&ha->hardware_lock);
1700
1701         msix_disabled_hccr = rsp->options;
1702         if (!rsp->id)
1703                 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
1704         else
1705                 msix_disabled_hccr &= BIT_6;
1706
1707         qla24xx_process_response_queue(rsp);
1708
1709         if (!msix_disabled_hccr)
1710                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1711
1712         spin_unlock_irq(&ha->hardware_lock);
1713
1714         return IRQ_HANDLED;
1715 }
1716
1717 static irqreturn_t
1718 qla24xx_msix_default(int irq, void *dev_id)
1719 {
1720         scsi_qla_host_t *vha;
1721         struct qla_hw_data *ha;
1722         struct rsp_que *rsp;
1723         struct device_reg_24xx __iomem *reg;
1724         int             status;
1725         uint32_t        stat;
1726         uint32_t        hccr;
1727         uint16_t        mb[4];
1728
1729         rsp = (struct rsp_que *) dev_id;
1730         if (!rsp) {
1731                 DEBUG(printk(
1732                 "%s(): NULL response queue pointer\n", __func__));
1733                 return IRQ_NONE;
1734         }
1735         ha = rsp->hw;
1736         reg = &ha->iobase->isp24;
1737         status = 0;
1738
1739         spin_lock_irq(&ha->hardware_lock);
1740         vha = qla2x00_get_rsp_host(rsp);
1741         do {
1742                 stat = RD_REG_DWORD(&reg->host_status);
1743                 if (stat & HSRX_RISC_PAUSED) {
1744                         if (pci_channel_offline(ha->pdev))
1745                                 break;
1746
1747                         if (ha->hw_event_pause_errors == 0)
1748                                 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1749                                     0, MSW(stat), LSW(stat));
1750                         else if (ha->hw_event_pause_errors < 0xffffffff)
1751                                 ha->hw_event_pause_errors++;
1752
1753                         hccr = RD_REG_DWORD(&reg->hccr);
1754
1755                         qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1756                             "Dumping firmware!\n", hccr);
1757
1758                         qla2xxx_check_risc_status(vha);
1759
1760                         ha->isp_ops->fw_dump(vha, 1);
1761                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1762                         break;
1763                 } else if ((stat & HSRX_RISC_INT) == 0)
1764                         break;
1765
1766                 switch (stat & 0xff) {
1767                 case 0x1:
1768                 case 0x2:
1769                 case 0x10:
1770                 case 0x11:
1771                         qla24xx_mbx_completion(vha, MSW(stat));
1772                         status |= MBX_INTERRUPT;
1773
1774                         break;
1775                 case 0x12:
1776                         mb[0] = MSW(stat);
1777                         mb[1] = RD_REG_WORD(&reg->mailbox1);
1778                         mb[2] = RD_REG_WORD(&reg->mailbox2);
1779                         mb[3] = RD_REG_WORD(&reg->mailbox3);
1780                         qla2x00_async_event(vha, rsp, mb);
1781                         break;
1782                 case 0x13:
1783                 case 0x14:
1784                         qla24xx_process_response_queue(rsp);
1785                         break;
1786                 default:
1787                         DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1788                             "(%d).\n",
1789                             vha->host_no, stat & 0xff));
1790                         break;
1791                 }
1792                 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1793         } while (0);
1794         spin_unlock_irq(&ha->hardware_lock);
1795
1796         if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1797             (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1798                 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1799                 complete(&ha->mbx_intr_comp);
1800         }
1801
1802         return IRQ_HANDLED;
1803 }
1804
1805 /* Interrupt handling helpers. */
1806
1807 struct qla_init_msix_entry {
1808         uint16_t entry;
1809         uint16_t index;
1810         const char *name;
1811         irq_handler_t handler;
1812 };
1813
1814 static struct qla_init_msix_entry base_queue = {
1815         .entry = 0,
1816         .index = 0,
1817         .name = "qla2xxx (default)",
1818         .handler = qla24xx_msix_default,
1819 };
1820
1821 static struct qla_init_msix_entry base_rsp_queue = {
1822         .entry = 1,
1823         .index = 1,
1824         .name = "qla2xxx (rsp_q)",
1825         .handler = qla24xx_msix_rsp_q,
1826 };
1827
1828 static struct qla_init_msix_entry multi_rsp_queue = {
1829         .entry = 1,
1830         .index = 1,
1831         .name = "qla2xxx (multi_q)",
1832         .handler = qla25xx_msix_rsp_q,
1833 };
1834
1835 static void
1836 qla24xx_disable_msix(struct qla_hw_data *ha)
1837 {
1838         int i;
1839         struct qla_msix_entry *qentry;
1840
1841         for (i = 0; i < ha->msix_count; i++) {
1842                 qentry = &ha->msix_entries[i];
1843                 if (qentry->have_irq)
1844                         free_irq(qentry->vector, qentry->rsp);
1845         }
1846         pci_disable_msix(ha->pdev);
1847         kfree(ha->msix_entries);
1848         ha->msix_entries = NULL;
1849         ha->flags.msix_enabled = 0;
1850 }
1851
1852 static int
1853 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1854 {
1855         int i, ret;
1856         struct msix_entry *entries;
1857         struct qla_msix_entry *qentry;
1858         struct qla_init_msix_entry *msix_queue;
1859
1860         entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1861                                         GFP_KERNEL);
1862         if (!entries)
1863                 return -ENOMEM;
1864
1865         for (i = 0; i < ha->msix_count; i++)
1866                 entries[i].entry = i;
1867
1868         ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1869         if (ret) {
1870                 qla_printk(KERN_WARNING, ha,
1871                         "MSI-X: Failed to enable support -- %d/%d\n"
1872                         " Retry with %d vectors\n", ha->msix_count, ret, ret);
1873                 ha->msix_count = ret;
1874                 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1875                 if (ret) {
1876                         qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1877                                 " support, giving up -- %d/%d\n",
1878                                 ha->msix_count, ret);
1879                         goto msix_out;
1880                 }
1881                 ha->max_queues = ha->msix_count - 1;
1882         }
1883         ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1884                                 ha->msix_count, GFP_KERNEL);
1885         if (!ha->msix_entries) {
1886                 ret = -ENOMEM;
1887                 goto msix_out;
1888         }
1889         ha->flags.msix_enabled = 1;
1890
1891         for (i = 0; i < ha->msix_count; i++) {
1892                 qentry = &ha->msix_entries[i];
1893                 qentry->vector = entries[i].vector;
1894                 qentry->entry = entries[i].entry;
1895                 qentry->have_irq = 0;
1896                 qentry->rsp = NULL;
1897         }
1898
1899         /* Enable MSI-X for AENs for queue 0 */
1900         qentry = &ha->msix_entries[0];
1901         ret = request_irq(qentry->vector, base_queue.handler, 0,
1902                                         base_queue.name, rsp);
1903         if (ret) {
1904                 qla_printk(KERN_WARNING, ha,
1905                         "MSI-X: Unable to register handler -- %x/%d.\n",
1906                         qentry->vector, ret);
1907                 qla24xx_disable_msix(ha);
1908                 goto msix_out;
1909         }
1910         qentry->have_irq = 1;
1911         qentry->rsp = rsp;
1912
1913         /* Enable MSI-X vector for response queue update for queue 0 */
1914         if (ha->max_queues > 1 && ha->mqiobase) {
1915                 ha->mqenable = 1;
1916                 msix_queue = &multi_rsp_queue;
1917                 qla_printk(KERN_INFO, ha,
1918                                 "MQ enabled, Number of Queue Resources: %d \n",
1919                                 ha->max_queues);
1920         } else {
1921                 ha->mqenable = 0;
1922                 msix_queue = &base_rsp_queue;
1923         }
1924
1925         qentry = &ha->msix_entries[1];
1926         ret = request_irq(qentry->vector, msix_queue->handler, 0,
1927                                                 msix_queue->name, rsp);
1928         if (ret) {
1929                 qla_printk(KERN_WARNING, ha,
1930                         "MSI-X: Unable to register handler -- %x/%d.\n",
1931                         qentry->vector, ret);
1932                 qla24xx_disable_msix(ha);
1933                 ha->mqenable = 0;
1934                 goto msix_out;
1935         }
1936         qentry->have_irq = 1;
1937         qentry->rsp = rsp;
1938
1939 msix_out:
1940         kfree(entries);
1941         return ret;
1942 }
1943
1944 int
1945 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1946 {
1947         int ret;
1948         device_reg_t __iomem *reg = ha->iobase;
1949
1950         /* If possible, enable MSI-X. */
1951         if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1952                 goto skip_msix;
1953
1954         if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1955                 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1956                 DEBUG2(qla_printk(KERN_WARNING, ha,
1957                 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1958                         ha->pdev->revision, ha->fw_attributes));
1959
1960                 goto skip_msix;
1961         }
1962
1963         if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1964             (ha->pdev->subsystem_device == 0x7040 ||
1965                 ha->pdev->subsystem_device == 0x7041 ||
1966                 ha->pdev->subsystem_device == 0x1705)) {
1967                 DEBUG2(qla_printk(KERN_WARNING, ha,
1968                     "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1969                     ha->pdev->subsystem_vendor,
1970                     ha->pdev->subsystem_device));
1971
1972                 goto skip_msi;
1973         }
1974
1975         ret = qla24xx_enable_msix(ha, rsp);
1976         if (!ret) {
1977                 DEBUG2(qla_printk(KERN_INFO, ha,
1978                     "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1979                     ha->fw_attributes));
1980                 goto clear_risc_ints;
1981         }
1982         qla_printk(KERN_WARNING, ha,
1983             "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1984 skip_msix:
1985
1986         if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1987                 goto skip_msi;
1988
1989         ret = pci_enable_msi(ha->pdev);
1990         if (!ret) {
1991                 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1992                 ha->flags.msi_enabled = 1;
1993         }
1994 skip_msi:
1995
1996         ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1997             IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
1998         if (ret) {
1999                 qla_printk(KERN_WARNING, ha,
2000                     "Failed to reserve interrupt %d already in use.\n",
2001                     ha->pdev->irq);
2002                 goto fail;
2003         }
2004         ha->flags.inta_enabled = 1;
2005 clear_risc_ints:
2006
2007         spin_lock_irq(&ha->hardware_lock);
2008         if (IS_FWI2_CAPABLE(ha)) {
2009                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2010                 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2011         } else {
2012                 WRT_REG_WORD(&reg->isp.semaphore, 0);
2013                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2014                 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2015         }
2016         spin_unlock_irq(&ha->hardware_lock);
2017
2018 fail:
2019         return ret;
2020 }
2021
2022 void
2023 qla2x00_free_irqs(scsi_qla_host_t *vha)
2024 {
2025         struct qla_hw_data *ha = vha->hw;
2026         struct rsp_que *rsp = ha->rsp_q_map[0];
2027
2028         if (ha->flags.msix_enabled)
2029                 qla24xx_disable_msix(ha);
2030         else if (ha->flags.inta_enabled) {
2031                 free_irq(ha->pdev->irq, rsp);
2032                 pci_disable_msi(ha->pdev);
2033         }
2034 }
2035
2036 static struct scsi_qla_host *
2037 qla2x00_get_rsp_host(struct rsp_que *rsp)
2038 {
2039         srb_t *sp;
2040         struct qla_hw_data *ha = rsp->hw;
2041         struct scsi_qla_host *vha = NULL;
2042         struct sts_entry_24xx *pkt;
2043         struct req_que *req;
2044
2045         if (rsp->id) {
2046                 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2047                 req = rsp->req;
2048                 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2049                         sp = req->outstanding_cmds[pkt->handle];
2050                         if (sp)
2051                                 vha = sp->vha;
2052                 }
2053         }
2054         if (!vha)
2055         /* handle it in base queue */
2056                 vha = pci_get_drvdata(ha->pdev);
2057
2058         return vha;
2059 }
2060
2061 int qla25xx_request_irq(struct rsp_que *rsp)
2062 {
2063         struct qla_hw_data *ha = rsp->hw;
2064         struct qla_init_msix_entry *intr = &multi_rsp_queue;
2065         struct qla_msix_entry *msix = rsp->msix;
2066         int ret;
2067
2068         ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2069         if (ret) {
2070                 qla_printk(KERN_WARNING, ha,
2071                         "MSI-X: Unable to register handler -- %x/%d.\n",
2072                         msix->vector, ret);
2073                 return ret;
2074         }
2075         msix->have_irq = 1;
2076         msix->rsp = rsp;
2077         return ret;
2078 }
2079