]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/qla2xxx/qla_isr.c
[SCSI] qla2xxx: changes in multiq code
[net-next-2.6.git] / drivers / scsi / qla2xxx / qla_isr.c
CommitLineData
1da177e4 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
01e58d8e 3 * Copyright (c) 2003-2008 QLogic Corporation
1da177e4 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
1da177e4
LT
6 */
7#include "qla_def.h"
8
05236a05 9#include <linux/delay.h>
df7baa50
AV
10#include <scsi/scsi_tcq.h>
11
1da177e4 12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
73208dfd
AC
13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
1da177e4 16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
73208dfd
AC
17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *);
e315cd28 19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
9a853f71 20
1da177e4
LT
21/**
22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
23 * @irq:
24 * @dev_id: SCSI driver HA context
1da177e4
LT
25 *
26 * Called by system whenever the host adapter generates an interrupt.
27 *
28 * Returns handled flag.
29 */
30irqreturn_t
7d12e780 31qla2100_intr_handler(int irq, void *dev_id)
1da177e4 32{
e315cd28
AC
33 scsi_qla_host_t *vha;
34 struct qla_hw_data *ha;
3d71644c 35 struct device_reg_2xxx __iomem *reg;
1da177e4 36 int status;
1da177e4 37 unsigned long iter;
14e660e6 38 uint16_t hccr;
9a853f71 39 uint16_t mb[4];
e315cd28 40 struct rsp_que *rsp;
1da177e4 41
e315cd28
AC
42 rsp = (struct rsp_que *) dev_id;
43 if (!rsp) {
1da177e4 44 printk(KERN_INFO
e315cd28 45 "%s(): NULL response queue pointer\n", __func__);
1da177e4
LT
46 return (IRQ_NONE);
47 }
48
e315cd28 49 ha = rsp->hw;
3d71644c 50 reg = &ha->iobase->isp;
1da177e4
LT
51 status = 0;
52
c6952483 53 spin_lock(&ha->hardware_lock);
e315cd28 54 vha = qla2x00_get_rsp_host(rsp);
1da177e4 55 for (iter = 50; iter--; ) {
14e660e6
SJ
56 hccr = RD_REG_WORD(&reg->hccr);
57 if (hccr & HCCR_RISC_PAUSE) {
58 if (pci_channel_offline(ha->pdev))
59 break;
60
61 /*
62 * Issue a "HARD" reset in order for the RISC interrupt
63 * bit to be cleared. Schedule a big hammmer to get
64 * out of the RISC PAUSED state.
65 */
66 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
67 RD_REG_WORD(&reg->hccr);
68
e315cd28
AC
69 ha->isp_ops->fw_dump(vha, 1);
70 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
14e660e6
SJ
71 break;
72 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
1da177e4
LT
73 break;
74
75 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
76 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
77 RD_REG_WORD(&reg->hccr);
78
79 /* Get mailbox data. */
9a853f71
AV
80 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
81 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
e315cd28 82 qla2x00_mbx_completion(vha, mb[0]);
1da177e4 83 status |= MBX_INTERRUPT;
9a853f71
AV
84 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
85 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
86 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
87 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 88 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
89 } else {
90 /*EMPTY*/
91 DEBUG2(printk("scsi(%ld): Unrecognized "
9a853f71 92 "interrupt type (%d).\n",
e315cd28 93 vha->host_no, mb[0]));
1da177e4
LT
94 }
95 /* Release mailbox registers. */
96 WRT_REG_WORD(&reg->semaphore, 0);
97 RD_REG_WORD(&reg->semaphore);
98 } else {
73208dfd 99 qla2x00_process_response_queue(rsp);
1da177e4
LT
100
101 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
102 RD_REG_WORD(&reg->hccr);
103 }
104 }
c6952483 105 spin_unlock(&ha->hardware_lock);
1da177e4 106
1da177e4
LT
107 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
108 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 109 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 110 complete(&ha->mbx_intr_comp);
1da177e4
LT
111 }
112
1da177e4
LT
113 return (IRQ_HANDLED);
114}
115
116/**
117 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
118 * @irq:
119 * @dev_id: SCSI driver HA context
1da177e4
LT
120 *
121 * Called by system whenever the host adapter generates an interrupt.
122 *
123 * Returns handled flag.
124 */
125irqreturn_t
7d12e780 126qla2300_intr_handler(int irq, void *dev_id)
1da177e4 127{
e315cd28 128 scsi_qla_host_t *vha;
3d71644c 129 struct device_reg_2xxx __iomem *reg;
1da177e4 130 int status;
1da177e4
LT
131 unsigned long iter;
132 uint32_t stat;
1da177e4 133 uint16_t hccr;
9a853f71 134 uint16_t mb[4];
e315cd28
AC
135 struct rsp_que *rsp;
136 struct qla_hw_data *ha;
1da177e4 137
e315cd28
AC
138 rsp = (struct rsp_que *) dev_id;
139 if (!rsp) {
1da177e4 140 printk(KERN_INFO
e315cd28 141 "%s(): NULL response queue pointer\n", __func__);
1da177e4
LT
142 return (IRQ_NONE);
143 }
144
e315cd28 145 ha = rsp->hw;
3d71644c 146 reg = &ha->iobase->isp;
1da177e4
LT
147 status = 0;
148
c6952483 149 spin_lock(&ha->hardware_lock);
e315cd28 150 vha = qla2x00_get_rsp_host(rsp);
1da177e4
LT
151 for (iter = 50; iter--; ) {
152 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
153 if (stat & HSR_RISC_PAUSED) {
14e660e6
SJ
154 if (pci_channel_offline(ha->pdev))
155 break;
156
1da177e4
LT
157 hccr = RD_REG_WORD(&reg->hccr);
158 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
07f31805
AV
159 qla_printk(KERN_INFO, ha, "Parity error -- "
160 "HCCR=%x, Dumping firmware!\n", hccr);
1da177e4 161 else
07f31805
AV
162 qla_printk(KERN_INFO, ha, "RISC paused -- "
163 "HCCR=%x, Dumping firmware!\n", hccr);
1da177e4
LT
164
165 /*
166 * Issue a "HARD" reset in order for the RISC
167 * interrupt bit to be cleared. Schedule a big
168 * hammmer to get out of the RISC PAUSED state.
169 */
170 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
171 RD_REG_WORD(&reg->hccr);
07f31805 172
e315cd28
AC
173 ha->isp_ops->fw_dump(vha, 1);
174 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
175 break;
176 } else if ((stat & HSR_RISC_INT) == 0)
177 break;
178
1da177e4 179 switch (stat & 0xff) {
1da177e4
LT
180 case 0x1:
181 case 0x2:
182 case 0x10:
183 case 0x11:
e315cd28 184 qla2x00_mbx_completion(vha, MSW(stat));
1da177e4
LT
185 status |= MBX_INTERRUPT;
186
187 /* Release mailbox registers. */
188 WRT_REG_WORD(&reg->semaphore, 0);
189 break;
190 case 0x12:
9a853f71
AV
191 mb[0] = MSW(stat);
192 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
73208dfd 195 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
196 break;
197 case 0x13:
73208dfd 198 qla2x00_process_response_queue(rsp);
1da177e4
LT
199 break;
200 case 0x15:
9a853f71
AV
201 mb[0] = MBA_CMPLT_1_16BIT;
202 mb[1] = MSW(stat);
73208dfd 203 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
204 break;
205 case 0x16:
9a853f71
AV
206 mb[0] = MBA_SCSI_COMPLETION;
207 mb[1] = MSW(stat);
208 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
73208dfd 209 qla2x00_async_event(vha, rsp, mb);
1da177e4
LT
210 break;
211 default:
212 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
9a853f71 213 "(%d).\n",
e315cd28 214 vha->host_no, stat & 0xff));
1da177e4
LT
215 break;
216 }
217 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
218 RD_REG_WORD_RELAXED(&reg->hccr);
219 }
c6952483 220 spin_unlock(&ha->hardware_lock);
1da177e4 221
1da177e4
LT
222 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
223 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1da177e4 224 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 225 complete(&ha->mbx_intr_comp);
1da177e4
LT
226 }
227
1da177e4
LT
228 return (IRQ_HANDLED);
229}
230
231/**
232 * qla2x00_mbx_completion() - Process mailbox command completions.
233 * @ha: SCSI driver HA context
234 * @mb0: Mailbox0 register
235 */
236static void
e315cd28 237qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1da177e4
LT
238{
239 uint16_t cnt;
240 uint16_t __iomem *wptr;
e315cd28 241 struct qla_hw_data *ha = vha->hw;
3d71644c 242 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
243
244 /* Load return mailbox registers. */
245 ha->flags.mbox_int = 1;
246 ha->mailbox_out[0] = mb0;
247 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
248
249 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
fa2a1ce5 250 if (IS_QLA2200(ha) && cnt == 8)
1da177e4
LT
251 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
252 if (cnt == 4 || cnt == 5)
253 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
254 else
255 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
fa2a1ce5 256
1da177e4
LT
257 wptr++;
258 }
259
260 if (ha->mcp) {
261 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
e315cd28 262 __func__, vha->host_no, ha->mcp->mb[0]));
1da177e4
LT
263 } else {
264 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
e315cd28 265 __func__, vha->host_no));
1da177e4
LT
266 }
267}
268
269/**
270 * qla2x00_async_event() - Process aynchronous events.
271 * @ha: SCSI driver HA context
9a853f71 272 * @mb: Mailbox registers (0 - 3)
1da177e4 273 */
2c3dfe3f 274void
73208dfd 275qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1da177e4 276{
9a853f71 277#define LS_UNKNOWN 2
c3a2f0df 278 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
1da177e4 279 char *link_speed;
1da177e4
LT
280 uint16_t handle_cnt;
281 uint16_t cnt;
282 uint32_t handles[5];
e315cd28 283 struct qla_hw_data *ha = vha->hw;
3d71644c 284 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
285 uint32_t rscn_entry, host_pid;
286 uint8_t rscn_queue_index;
4d4df193 287 unsigned long flags;
1da177e4
LT
288
289 /* Setup to process RIO completion. */
290 handle_cnt = 0;
1da177e4
LT
291 switch (mb[0]) {
292 case MBA_SCSI_COMPLETION:
9a853f71 293 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
294 handle_cnt = 1;
295 break;
296 case MBA_CMPLT_1_16BIT:
9a853f71 297 handles[0] = mb[1];
1da177e4
LT
298 handle_cnt = 1;
299 mb[0] = MBA_SCSI_COMPLETION;
300 break;
301 case MBA_CMPLT_2_16BIT:
9a853f71
AV
302 handles[0] = mb[1];
303 handles[1] = mb[2];
1da177e4
LT
304 handle_cnt = 2;
305 mb[0] = MBA_SCSI_COMPLETION;
306 break;
307 case MBA_CMPLT_3_16BIT:
9a853f71
AV
308 handles[0] = mb[1];
309 handles[1] = mb[2];
310 handles[2] = mb[3];
1da177e4
LT
311 handle_cnt = 3;
312 mb[0] = MBA_SCSI_COMPLETION;
313 break;
314 case MBA_CMPLT_4_16BIT:
9a853f71
AV
315 handles[0] = mb[1];
316 handles[1] = mb[2];
317 handles[2] = mb[3];
1da177e4
LT
318 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
319 handle_cnt = 4;
320 mb[0] = MBA_SCSI_COMPLETION;
321 break;
322 case MBA_CMPLT_5_16BIT:
9a853f71
AV
323 handles[0] = mb[1];
324 handles[1] = mb[2];
325 handles[2] = mb[3];
1da177e4
LT
326 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
327 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
328 handle_cnt = 5;
329 mb[0] = MBA_SCSI_COMPLETION;
330 break;
331 case MBA_CMPLT_2_32BIT:
9a853f71 332 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
1da177e4
LT
333 handles[1] = le32_to_cpu(
334 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
335 RD_MAILBOX_REG(ha, reg, 6));
336 handle_cnt = 2;
337 mb[0] = MBA_SCSI_COMPLETION;
338 break;
339 default:
340 break;
341 }
342
343 switch (mb[0]) {
344 case MBA_SCSI_COMPLETION: /* Fast Post */
e315cd28 345 if (!vha->flags.online)
1da177e4
LT
346 break;
347
348 for (cnt = 0; cnt < handle_cnt; cnt++)
73208dfd
AC
349 qla2x00_process_completed_request(vha, rsp->req,
350 handles[cnt]);
1da177e4
LT
351 break;
352
353 case MBA_RESET: /* Reset */
e315cd28
AC
354 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
355 vha->host_no));
1da177e4 356
e315cd28 357 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
358 break;
359
360 case MBA_SYSTEM_ERR: /* System Error */
1da177e4
LT
361 qla_printk(KERN_INFO, ha,
362 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
363 mb[1], mb[2], mb[3]);
364
e315cd28
AC
365 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
366 ha->isp_ops->fw_dump(vha, 1);
1da177e4 367
e428924c 368 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
369 if (mb[1] == 0 && mb[2] == 0) {
370 qla_printk(KERN_ERR, ha,
371 "Unrecoverable Hardware Error: adapter "
372 "marked OFFLINE!\n");
e315cd28 373 vha->flags.online = 0;
9a853f71 374 } else
e315cd28 375 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9a853f71 376 } else if (mb[1] == 0) {
1da177e4
LT
377 qla_printk(KERN_INFO, ha,
378 "Unrecoverable Hardware Error: adapter marked "
379 "OFFLINE!\n");
e315cd28 380 vha->flags.online = 0;
1da177e4 381 } else
e315cd28 382 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
383 break;
384
385 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
386 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
e315cd28 387 vha->host_no));
1da177e4
LT
388 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
389
e315cd28
AC
390 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
391 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
392 break;
393
394 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
395 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
e315cd28 396 vha->host_no));
1da177e4
LT
397 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
398
e315cd28
AC
399 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
400 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
401 break;
402
403 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
404 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
e315cd28 405 vha->host_no));
1da177e4
LT
406 break;
407
408 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
e315cd28 409 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
1da177e4 410 mb[1]));
cc3ef7bc 411 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
1da177e4 412
e315cd28
AC
413 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
414 atomic_set(&vha->loop_state, LOOP_DOWN);
415 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
416 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
417 }
418
e315cd28
AC
419 if (vha->vp_idx) {
420 atomic_set(&vha->vp_state, VP_FAILED);
421 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
422 }
423
e315cd28
AC
424 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
425 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1da177e4 426
e315cd28
AC
427 vha->flags.management_server_logged_in = 0;
428 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1da177e4
LT
429 break;
430
431 case MBA_LOOP_UP: /* Loop Up Event */
1da177e4
LT
432 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
433 link_speed = link_speeds[0];
d8b45213 434 ha->link_data_rate = PORT_SPEED_1GB;
1da177e4 435 } else {
9a853f71 436 link_speed = link_speeds[LS_UNKNOWN];
1da177e4
LT
437 if (mb[1] < 5)
438 link_speed = link_speeds[mb[1]];
439 ha->link_data_rate = mb[1];
440 }
441
442 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
e315cd28 443 vha->host_no, link_speed));
1da177e4
LT
444 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
445 link_speed);
446
e315cd28
AC
447 vha->flags.management_server_logged_in = 0;
448 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1da177e4
LT
449 break;
450
451 case MBA_LOOP_DOWN: /* Loop Down Event */
4d4df193 452 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
e315cd28 453 "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
4d4df193
HK
454 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
455 mb[1], mb[2], mb[3]);
1da177e4 456
e315cd28
AC
457 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
458 atomic_set(&vha->loop_state, LOOP_DOWN);
459 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
460 vha->device_flags |= DFLG_NO_CABLE;
461 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
462 }
463
e315cd28
AC
464 if (vha->vp_idx) {
465 atomic_set(&vha->vp_state, VP_FAILED);
466 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
467 }
468
e315cd28 469 vha->flags.management_server_logged_in = 0;
d8b45213 470 ha->link_data_rate = PORT_SPEED_UNKNOWN;
e315cd28 471 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1da177e4
LT
472 break;
473
474 case MBA_LIP_RESET: /* LIP reset occurred */
1da177e4 475 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
e315cd28 476 vha->host_no, mb[1]));
1da177e4 477 qla_printk(KERN_INFO, ha,
cc3ef7bc 478 "LIP reset occurred (%x).\n", mb[1]);
1da177e4 479
e315cd28
AC
480 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
481 atomic_set(&vha->loop_state, LOOP_DOWN);
482 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
483 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
484 }
485
e315cd28
AC
486 if (vha->vp_idx) {
487 atomic_set(&vha->vp_state, VP_FAILED);
488 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
489 }
490
e315cd28 491 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1da177e4
LT
492
493 ha->operating_mode = LOOP;
e315cd28
AC
494 vha->flags.management_server_logged_in = 0;
495 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1da177e4
LT
496 break;
497
498 case MBA_POINT_TO_POINT: /* Point-to-Point */
499 if (IS_QLA2100(ha))
500 break;
501
502 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
e315cd28 503 vha->host_no));
1da177e4
LT
504
505 /*
506 * Until there's a transition from loop down to loop up, treat
507 * this as loop down only.
508 */
e315cd28
AC
509 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
510 atomic_set(&vha->loop_state, LOOP_DOWN);
511 if (!atomic_read(&vha->loop_down_timer))
512 atomic_set(&vha->loop_down_timer,
1da177e4 513 LOOP_DOWN_TIME);
e315cd28 514 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
515 }
516
e315cd28
AC
517 if (vha->vp_idx) {
518 atomic_set(&vha->vp_state, VP_FAILED);
519 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
520 }
521
e315cd28
AC
522 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
523 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
524
525 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
526 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4346b149
AV
527
528 ha->flags.gpsc_supported = 1;
e315cd28 529 vha->flags.management_server_logged_in = 0;
1da177e4
LT
530 break;
531
532 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
533 if (IS_QLA2100(ha))
534 break;
535
1da177e4
LT
536 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
537 "received.\n",
e315cd28 538 vha->host_no));
1da177e4
LT
539 qla_printk(KERN_INFO, ha,
540 "Configuration change detected: value=%x.\n", mb[1]);
541
e315cd28
AC
542 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
543 atomic_set(&vha->loop_state, LOOP_DOWN);
544 if (!atomic_read(&vha->loop_down_timer))
545 atomic_set(&vha->loop_down_timer,
1da177e4 546 LOOP_DOWN_TIME);
e315cd28 547 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4
LT
548 }
549
e315cd28
AC
550 if (vha->vp_idx) {
551 atomic_set(&vha->vp_state, VP_FAILED);
552 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
2c3dfe3f
SJ
553 }
554
e315cd28
AC
555 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
556 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
557 break;
558
559 case MBA_PORT_UPDATE: /* Port database update */
73208dfd
AC
560 /* Only handle SCNs for our Vport index. */
561 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
562 break;
563
1da177e4 564 /*
cc3ef7bc 565 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1da177e4
LT
566 * event etc. earlier indicating loop is down) then process
567 * it. Otherwise ignore it and Wait for RSCN to come in.
568 */
e315cd28
AC
569 atomic_set(&vha->loop_down_timer, 0);
570 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
571 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1da177e4 572 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
e315cd28 573 "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
9a853f71 574 mb[2], mb[3]));
1da177e4
LT
575 break;
576 }
577
578 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
e315cd28 579 vha->host_no));
1da177e4 580 DEBUG(printk(KERN_INFO
9a853f71 581 "scsi(%ld): Port database changed %04x %04x %04x.\n",
e315cd28 582 vha->host_no, mb[1], mb[2], mb[3]));
1da177e4
LT
583
584 /*
585 * Mark all devices as missing so we will login again.
586 */
e315cd28 587 atomic_set(&vha->loop_state, LOOP_UP);
1da177e4 588
e315cd28 589 qla2x00_mark_all_devices_lost(vha, 1);
1da177e4 590
e315cd28 591 vha->flags.rscn_queue_overflow = 1;
1da177e4 592
e315cd28
AC
593 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
594 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1da177e4
LT
595 break;
596
597 case MBA_RSCN_UPDATE: /* State Change Registration */
3c397400 598 /* Check if the Vport has issued a SCR */
e315cd28 599 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
3c397400
SJ
600 break;
601 /* Only handle SCNs for our Vport index. */
e315cd28 602 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
3c397400 603 break;
1da177e4 604 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
e315cd28 605 vha->host_no));
1da177e4 606 DEBUG(printk(KERN_INFO
f4a8dbc7 607 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
e315cd28 608 vha->host_no, mb[1], mb[2], mb[3]));
1da177e4 609
59d72d87 610 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
e315cd28
AC
611 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
612 | vha->d_id.b.al_pa;
1da177e4
LT
613 if (rscn_entry == host_pid) {
614 DEBUG(printk(KERN_INFO
615 "scsi(%ld): Ignoring RSCN update to local host "
616 "port ID (%06x)\n",
e315cd28 617 vha->host_no, host_pid));
1da177e4
LT
618 break;
619 }
620
59d72d87
RA
621 /* Ignore reserved bits from RSCN-payload. */
622 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
e315cd28 623 rscn_queue_index = vha->rscn_in_ptr + 1;
1da177e4
LT
624 if (rscn_queue_index == MAX_RSCN_COUNT)
625 rscn_queue_index = 0;
e315cd28
AC
626 if (rscn_queue_index != vha->rscn_out_ptr) {
627 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
628 vha->rscn_in_ptr = rscn_queue_index;
1da177e4 629 } else {
e315cd28 630 vha->flags.rscn_queue_overflow = 1;
1da177e4
LT
631 }
632
e315cd28
AC
633 atomic_set(&vha->loop_state, LOOP_UPDATE);
634 atomic_set(&vha->loop_down_timer, 0);
635 vha->flags.management_server_logged_in = 0;
1da177e4 636
e315cd28
AC
637 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
638 set_bit(RSCN_UPDATE, &vha->dpc_flags);
639 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1da177e4
LT
640 break;
641
642 /* case MBA_RIO_RESPONSE: */
643 case MBA_ZIO_RESPONSE:
644 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
e315cd28 645 vha->host_no));
1da177e4
LT
646 DEBUG(printk(KERN_INFO
647 "scsi(%ld): [R|Z]IO update completion.\n",
e315cd28 648 vha->host_no));
1da177e4 649
e428924c 650 if (IS_FWI2_CAPABLE(ha))
73208dfd 651 qla24xx_process_response_queue(rsp);
4fdfefe5 652 else
73208dfd 653 qla2x00_process_response_queue(rsp);
1da177e4 654 break;
9a853f71
AV
655
656 case MBA_DISCARD_RND_FRAME:
657 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
e315cd28 658 "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
9a853f71 659 break;
45ebeb56
AV
660
661 case MBA_TRACE_NOTIFICATION:
662 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
e315cd28 663 vha->host_no, mb[1], mb[2]));
45ebeb56 664 break;
4d4df193
HK
665
666 case MBA_ISP84XX_ALERT:
667 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
e315cd28 668 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
4d4df193
HK
669
670 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
671 switch (mb[1]) {
672 case A84_PANIC_RECOVERY:
673 qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
674 "%04x %04x\n", mb[2], mb[3]);
675 break;
676 case A84_OP_LOGIN_COMPLETE:
677 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
678 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
679 "firmware version %x\n", ha->cs84xx->op_fw_version));
680 break;
681 case A84_DIAG_LOGIN_COMPLETE:
682 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
683 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
684 "diagnostic firmware version %x\n",
685 ha->cs84xx->diag_fw_version));
686 break;
687 case A84_GOLD_LOGIN_COMPLETE:
688 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
689 ha->cs84xx->fw_update = 1;
690 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
691 "firmware version %x\n",
692 ha->cs84xx->gold_fw_version));
693 break;
694 default:
695 qla_printk(KERN_ERR, ha,
696 "Alert 84xx: Invalid Alert %04x %04x %04x\n",
697 mb[1], mb[2], mb[3]);
698 }
699 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
700 break;
1da177e4 701 }
2c3dfe3f 702
e315cd28 703 if (!vha->vp_idx && ha->num_vhosts)
73208dfd 704 qla2x00_alert_all_vps(rsp, mb);
1da177e4
LT
705}
706
df7baa50
AV
707static void
708qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
709{
710 fc_port_t *fcport = data;
73208dfd
AC
711 struct scsi_qla_host *vha = fcport->vha;
712 struct qla_hw_data *ha = vha->hw;
713 struct req_que *req = NULL;
714
715 req = ha->req_q_map[vha->req_ques[0]];
716 if (!req)
717 return;
718 if (req->max_q_depth <= sdev->queue_depth)
df7baa50
AV
719 return;
720
721 if (sdev->ordered_tags)
722 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
723 sdev->queue_depth + 1);
724 else
725 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
726 sdev->queue_depth + 1);
727
728 fcport->last_ramp_up = jiffies;
729
e315cd28 730 DEBUG2(qla_printk(KERN_INFO, ha,
df7baa50 731 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
e315cd28 732 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
df7baa50
AV
733 sdev->queue_depth));
734}
735
736static void
737qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
738{
739 fc_port_t *fcport = data;
740
741 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
742 return;
743
e315cd28 744 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
df7baa50 745 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
e315cd28 746 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
df7baa50
AV
747 sdev->queue_depth));
748}
749
750static inline void
73208dfd
AC
751qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
752 srb_t *sp)
df7baa50
AV
753{
754 fc_port_t *fcport;
755 struct scsi_device *sdev;
756
757 sdev = sp->cmd->device;
73208dfd 758 if (sdev->queue_depth >= req->max_q_depth)
df7baa50
AV
759 return;
760
761 fcport = sp->fcport;
762 if (time_before(jiffies,
763 fcport->last_ramp_up + ql2xqfullrampup * HZ))
764 return;
765 if (time_before(jiffies,
766 fcport->last_queue_full + ql2xqfullrampup * HZ))
767 return;
768
df7baa50
AV
769 starget_for_each_device(sdev->sdev_target, fcport,
770 qla2x00_adjust_sdev_qdepth_up);
df7baa50
AV
771}
772
1da177e4
LT
773/**
774 * qla2x00_process_completed_request() - Process a Fast Post response.
775 * @ha: SCSI driver HA context
776 * @index: SRB index
777 */
778static void
73208dfd
AC
779qla2x00_process_completed_request(struct scsi_qla_host *vha,
780 struct req_que *req, uint32_t index)
1da177e4
LT
781{
782 srb_t *sp;
e315cd28 783 struct qla_hw_data *ha = vha->hw;
1da177e4
LT
784
785 /* Validate handle. */
786 if (index >= MAX_OUTSTANDING_COMMANDS) {
787 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
e315cd28 788 vha->host_no, index));
1da177e4
LT
789 qla_printk(KERN_WARNING, ha,
790 "Invalid SCSI completion handle %d.\n", index);
791
e315cd28 792 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
793 return;
794 }
795
e315cd28 796 sp = req->outstanding_cmds[index];
1da177e4
LT
797 if (sp) {
798 /* Free outstanding command slot. */
e315cd28 799 req->outstanding_cmds[index] = NULL;
1da177e4 800
1da177e4
LT
801 CMD_COMPL_STATUS(sp->cmd) = 0L;
802 CMD_SCSI_STATUS(sp->cmd) = 0L;
803
804 /* Save ISP completion status */
805 sp->cmd->result = DID_OK << 16;
df7baa50 806
73208dfd
AC
807 qla2x00_ramp_up_queue_depth(vha, req, sp);
808 qla2x00_sp_compl(ha, sp);
1da177e4
LT
809 } else {
810 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
e315cd28 811 vha->host_no));
1da177e4
LT
812 qla_printk(KERN_WARNING, ha,
813 "Invalid ISP SCSI completion handle\n");
814
e315cd28 815 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1da177e4
LT
816 }
817}
818
819/**
820 * qla2x00_process_response_queue() - Process response queue entries.
821 * @ha: SCSI driver HA context
822 */
823void
73208dfd 824qla2x00_process_response_queue(struct rsp_que *rsp)
1da177e4 825{
73208dfd
AC
826 struct scsi_qla_host *vha;
827 struct qla_hw_data *ha = rsp->hw;
3d71644c 828 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1da177e4
LT
829 sts_entry_t *pkt;
830 uint16_t handle_cnt;
831 uint16_t cnt;
73208dfd
AC
832
833 vha = qla2x00_get_rsp_host(rsp);
1da177e4 834
e315cd28 835 if (!vha->flags.online)
1da177e4
LT
836 return;
837
e315cd28
AC
838 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
839 pkt = (sts_entry_t *)rsp->ring_ptr;
1da177e4 840
e315cd28
AC
841 rsp->ring_index++;
842 if (rsp->ring_index == rsp->length) {
843 rsp->ring_index = 0;
844 rsp->ring_ptr = rsp->ring;
1da177e4 845 } else {
e315cd28 846 rsp->ring_ptr++;
1da177e4
LT
847 }
848
849 if (pkt->entry_status != 0) {
850 DEBUG3(printk(KERN_INFO
e315cd28 851 "scsi(%ld): Process error entry.\n", vha->host_no));
1da177e4 852
73208dfd 853 qla2x00_error_entry(vha, rsp, pkt);
1da177e4
LT
854 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
855 wmb();
856 continue;
857 }
858
859 switch (pkt->entry_type) {
860 case STATUS_TYPE:
73208dfd 861 qla2x00_status_entry(vha, rsp, pkt);
1da177e4
LT
862 break;
863 case STATUS_TYPE_21:
864 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
865 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 866 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
867 ((sts21_entry_t *)pkt)->handle[cnt]);
868 }
869 break;
870 case STATUS_TYPE_22:
871 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
872 for (cnt = 0; cnt < handle_cnt; cnt++) {
73208dfd 873 qla2x00_process_completed_request(vha, rsp->req,
1da177e4
LT
874 ((sts22_entry_t *)pkt)->handle[cnt]);
875 }
876 break;
877 case STATUS_CONT_TYPE:
e315cd28 878 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1da177e4 879 break;
1da177e4
LT
880 default:
881 /* Type Not Supported. */
882 DEBUG4(printk(KERN_WARNING
883 "scsi(%ld): Received unknown response pkt type %x "
884 "entry status=%x.\n",
e315cd28 885 vha->host_no, pkt->entry_type, pkt->entry_status));
1da177e4
LT
886 break;
887 }
888 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
889 wmb();
890 }
891
892 /* Adjust ring index */
e315cd28 893 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1da177e4
LT
894}
895
4733fcb1
AV
896static inline void
897qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
898{
899 struct scsi_cmnd *cp = sp->cmd;
900
901 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
902 sense_len = SCSI_SENSE_BUFFERSIZE;
903
904 CMD_ACTUAL_SNSLEN(cp) = sense_len;
905 sp->request_sense_length = sense_len;
906 sp->request_sense_ptr = cp->sense_buffer;
907 if (sp->request_sense_length > 32)
908 sense_len = 32;
909
910 memcpy(cp->sense_buffer, sense_data, sense_len);
911
912 sp->request_sense_ptr += sense_len;
913 sp->request_sense_length -= sense_len;
914 if (sp->request_sense_length != 0)
e315cd28 915 sp->fcport->vha->status_srb = sp;
4733fcb1
AV
916
917 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
e315cd28 918 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
19851f13
AV
919 cp->device->channel, cp->device->id, cp->device->lun, cp,
920 cp->serial_number));
4733fcb1
AV
921 if (sense_len)
922 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
923 CMD_ACTUAL_SNSLEN(cp)));
924}
925
1da177e4
LT
926/**
927 * qla2x00_status_entry() - Process a Status IOCB entry.
928 * @ha: SCSI driver HA context
929 * @pkt: Entry pointer
930 */
931static void
73208dfd 932qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1da177e4 933{
1da177e4 934 srb_t *sp;
1da177e4
LT
935 fc_port_t *fcport;
936 struct scsi_cmnd *cp;
9a853f71
AV
937 sts_entry_t *sts;
938 struct sts_entry_24xx *sts24;
1da177e4
LT
939 uint16_t comp_status;
940 uint16_t scsi_status;
941 uint8_t lscsi_status;
942 int32_t resid;
ed17c71b 943 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
9a853f71 944 uint8_t *rsp_info, *sense_data;
e315cd28 945 struct qla_hw_data *ha = vha->hw;
73208dfd 946 struct req_que *req = rsp->req;
9a853f71
AV
947
948 sts = (sts_entry_t *) pkt;
949 sts24 = (struct sts_entry_24xx *) pkt;
e428924c 950 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
951 comp_status = le16_to_cpu(sts24->comp_status);
952 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
953 } else {
954 comp_status = le16_to_cpu(sts->comp_status);
955 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
956 }
1da177e4
LT
957
958 /* Fast path completion. */
9a853f71 959 if (comp_status == CS_COMPLETE && scsi_status == 0) {
73208dfd 960 qla2x00_process_completed_request(vha, req, sts->handle);
1da177e4
LT
961
962 return;
963 }
964
965 /* Validate handle. */
9a853f71 966 if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
e315cd28
AC
967 sp = req->outstanding_cmds[sts->handle];
968 req->outstanding_cmds[sts->handle] = NULL;
1da177e4
LT
969 } else
970 sp = NULL;
971
972 if (sp == NULL) {
973 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
e315cd28 974 vha->host_no));
1da177e4
LT
975 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
976
e315cd28
AC
977 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
978 qla2xxx_wake_dpc(vha);
1da177e4
LT
979 return;
980 }
981 cp = sp->cmd;
982 if (cp == NULL) {
983 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
e315cd28 984 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
1da177e4
LT
985 qla_printk(KERN_WARNING, ha,
986 "Command is NULL: already returned to OS (sp=%p)\n", sp);
987
988 return;
989 }
990
9a853f71
AV
991 lscsi_status = scsi_status & STATUS_MASK;
992 CMD_ENTRY_STATUS(cp) = sts->entry_status;
1da177e4
LT
993 CMD_COMPL_STATUS(cp) = comp_status;
994 CMD_SCSI_STATUS(cp) = scsi_status;
995
bdf79621 996 fcport = sp->fcport;
1da177e4 997
ed17c71b 998 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
e428924c 999 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
1000 sense_len = le32_to_cpu(sts24->sense_len);
1001 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1002 resid_len = le32_to_cpu(sts24->rsp_residual_count);
ed17c71b 1003 fw_resid_len = le32_to_cpu(sts24->residual_len);
9a853f71
AV
1004 rsp_info = sts24->data;
1005 sense_data = sts24->data;
1006 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1007 } else {
1008 sense_len = le16_to_cpu(sts->req_sense_length);
1009 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1010 resid_len = le32_to_cpu(sts->residual_length);
1011 rsp_info = sts->rsp_info;
1012 sense_data = sts->req_sense_data;
1013 }
1014
1da177e4
LT
1015 /* Check for any FCP transport errors. */
1016 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
9a853f71 1017 /* Sense data lies beyond any FCP RESPONSE data. */
e428924c 1018 if (IS_FWI2_CAPABLE(ha))
9a853f71
AV
1019 sense_data += rsp_info_len;
1020 if (rsp_info_len > 3 && rsp_info[3]) {
1da177e4
LT
1021 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1022 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
e315cd28 1023 "retrying command\n", vha->host_no,
9a853f71
AV
1024 cp->device->channel, cp->device->id,
1025 cp->device->lun, rsp_info_len, rsp_info[0],
1026 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1027 rsp_info[5], rsp_info[6], rsp_info[7]));
1da177e4
LT
1028
1029 cp->result = DID_BUS_BUSY << 16;
73208dfd 1030 qla2x00_sp_compl(ha, sp);
1da177e4
LT
1031 return;
1032 }
1033 }
1034
3e8ce320
AV
1035 /* Check for overrun. */
1036 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1037 scsi_status & SS_RESIDUAL_OVER)
1038 comp_status = CS_DATA_OVERRUN;
1039
1da177e4
LT
1040 /*
1041 * Based on Host and scsi status generate status code for Linux
1042 */
1043 switch (comp_status) {
1044 case CS_COMPLETE:
df7baa50 1045 case CS_QUEUE_FULL:
1da177e4
LT
1046 if (scsi_status == 0) {
1047 cp->result = DID_OK << 16;
1048 break;
1049 }
1050 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
9a853f71 1051 resid = resid_len;
385d70b4 1052 scsi_set_resid(cp, resid);
1da177e4 1053 CMD_RESID_LEN(cp) = resid;
0da69df1
AV
1054
1055 if (!lscsi_status &&
385d70b4 1056 ((unsigned)(scsi_bufflen(cp) - resid) <
0da69df1
AV
1057 cp->underflow)) {
1058 qla_printk(KERN_INFO, ha,
385d70b4
FT
1059 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1060 "detected (%x of %x bytes)...returning "
e315cd28 1061 "error status.\n", vha->host_no,
385d70b4
FT
1062 cp->device->channel, cp->device->id,
1063 cp->device->lun, resid,
1064 scsi_bufflen(cp));
0da69df1
AV
1065
1066 cp->result = DID_ERROR << 16;
1067 break;
1068 }
1da177e4 1069 }
1da177e4
LT
1070 cp->result = DID_OK << 16 | lscsi_status;
1071
df7baa50
AV
1072 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1073 DEBUG2(printk(KERN_INFO
1074 "scsi(%ld): QUEUE FULL status detected "
e315cd28 1075 "0x%x-0x%x.\n", vha->host_no, comp_status,
df7baa50
AV
1076 scsi_status));
1077
1078 /* Adjust queue depth for all luns on the port. */
1079 fcport->last_queue_full = jiffies;
df7baa50
AV
1080 starget_for_each_device(cp->device->sdev_target,
1081 fcport, qla2x00_adjust_sdev_qdepth_down);
df7baa50
AV
1082 break;
1083 }
1da177e4
LT
1084 if (lscsi_status != SS_CHECK_CONDITION)
1085 break;
1086
b80ca4f7 1087 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
1088 if (!(scsi_status & SS_SENSE_LEN_VALID))
1089 break;
1090
4733fcb1 1091 qla2x00_handle_sense(sp, sense_data, sense_len);
1da177e4
LT
1092 break;
1093
1094 case CS_DATA_UNDERRUN:
9a853f71 1095 resid = resid_len;
ed17c71b 1096 /* Use F/W calculated residual length. */
6acf8190 1097 if (IS_FWI2_CAPABLE(ha)) {
2d136938
AV
1098 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1099 lscsi_status = 0;
1100 } else if (resid != fw_resid_len) {
6acf8190
AV
1101 scsi_status &= ~SS_RESIDUAL_UNDER;
1102 lscsi_status = 0;
1103 }
ed17c71b 1104 resid = fw_resid_len;
6acf8190 1105 }
ed17c71b 1106
1da177e4 1107 if (scsi_status & SS_RESIDUAL_UNDER) {
385d70b4 1108 scsi_set_resid(cp, resid);
1da177e4 1109 CMD_RESID_LEN(cp) = resid;
e038a1be
AV
1110 } else {
1111 DEBUG2(printk(KERN_INFO
1112 "scsi(%ld:%d:%d) UNDERRUN status detected "
ed17c71b 1113 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
e315cd28 1114 "os_underflow=0x%x\n", vha->host_no,
ed17c71b
RA
1115 cp->device->id, cp->device->lun, comp_status,
1116 scsi_status, resid_len, resid, cp->cmnd[0],
1117 cp->underflow));
e038a1be 1118
1da177e4
LT
1119 }
1120
1121 /*
fa2a1ce5 1122 * Check to see if SCSI Status is non zero. If so report SCSI
1da177e4
LT
1123 * Status.
1124 */
1125 if (lscsi_status != 0) {
1da177e4
LT
1126 cp->result = DID_OK << 16 | lscsi_status;
1127
ffec28a3
AV
1128 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1129 DEBUG2(printk(KERN_INFO
1130 "scsi(%ld): QUEUE FULL status detected "
e315cd28 1131 "0x%x-0x%x.\n", vha->host_no, comp_status,
ffec28a3
AV
1132 scsi_status));
1133
1134 /*
1135 * Adjust queue depth for all luns on the
1136 * port.
1137 */
1138 fcport->last_queue_full = jiffies;
1139 starget_for_each_device(
1140 cp->device->sdev_target, fcport,
1141 qla2x00_adjust_sdev_qdepth_down);
1142 break;
1143 }
1da177e4
LT
1144 if (lscsi_status != SS_CHECK_CONDITION)
1145 break;
1146
b80ca4f7 1147 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4
LT
1148 if (!(scsi_status & SS_SENSE_LEN_VALID))
1149 break;
1150
4733fcb1 1151 qla2x00_handle_sense(sp, sense_data, sense_len);
1da177e4
LT
1152 } else {
1153 /*
1154 * If RISC reports underrun and target does not report
1155 * it then we must have a lost frame, so tell upper
1156 * layer to retry it by reporting a bus busy.
1157 */
1158 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1159 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
385d70b4 1160 "frame(s) detected (%x of %x bytes)..."
e315cd28
AC
1161 "retrying command.\n",
1162 vha->host_no, cp->device->channel,
1163 cp->device->id, cp->device->lun, resid,
1164 scsi_bufflen(cp)));
1da177e4
LT
1165
1166 cp->result = DID_BUS_BUSY << 16;
1da177e4
LT
1167 break;
1168 }
1169
1170 /* Handle mid-layer underflow */
385d70b4 1171 if ((unsigned)(scsi_bufflen(cp) - resid) <
1da177e4
LT
1172 cp->underflow) {
1173 qla_printk(KERN_INFO, ha,
385d70b4
FT
1174 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1175 "detected (%x of %x bytes)...returning "
e315cd28 1176 "error status.\n", vha->host_no,
385d70b4
FT
1177 cp->device->channel, cp->device->id,
1178 cp->device->lun, resid,
1179 scsi_bufflen(cp));
1da177e4
LT
1180
1181 cp->result = DID_ERROR << 16;
1182 break;
1183 }
1184
1185 /* Everybody online, looking good... */
1186 cp->result = DID_OK << 16;
1187 }
1188 break;
1189
1190 case CS_DATA_OVERRUN:
1191 DEBUG2(printk(KERN_INFO
1192 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
e315cd28 1193 vha->host_no, cp->device->id, cp->device->lun, comp_status,
9a853f71 1194 scsi_status));
1da177e4
LT
1195 DEBUG2(printk(KERN_INFO
1196 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1197 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1198 cp->cmnd[4], cp->cmnd[5]));
1199 DEBUG2(printk(KERN_INFO
1200 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1201 "status!\n",
385d70b4 1202 cp->serial_number, scsi_bufflen(cp), resid_len));
1da177e4
LT
1203
1204 cp->result = DID_ERROR << 16;
1205 break;
1206
1207 case CS_PORT_LOGGED_OUT:
1208 case CS_PORT_CONFIG_CHG:
1209 case CS_PORT_BUSY:
1210 case CS_INCOMPLETE:
1211 case CS_PORT_UNAVAILABLE:
1212 /*
1213 * If the port is in Target Down state, return all IOs for this
1214 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1215 * retry_queue.
1216 */
1da177e4
LT
1217 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1218 "pid=%ld, compl status=0x%x, port state=0x%x\n",
e315cd28 1219 vha->host_no, cp->device->id, cp->device->lun,
9a853f71 1220 cp->serial_number, comp_status,
1da177e4
LT
1221 atomic_read(&fcport->state)));
1222
056a4483
MC
1223 /*
1224 * We are going to have the fc class block the rport
1225 * while we try to recover so instruct the mid layer
1226 * to requeue until the class decides how to handle this.
1227 */
1228 cp->result = DID_TRANSPORT_DISRUPTED << 16;
a7a28504 1229 if (atomic_read(&fcport->state) == FCS_ONLINE)
e315cd28 1230 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1da177e4
LT
1231 break;
1232
1233 case CS_RESET:
1234 DEBUG2(printk(KERN_INFO
1235 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
e315cd28 1236 vha->host_no, comp_status, scsi_status));
1da177e4 1237
f4f051eb 1238 cp->result = DID_RESET << 16;
1da177e4
LT
1239 break;
1240
1241 case CS_ABORTED:
fa2a1ce5 1242 /*
1da177e4
LT
1243 * hv2.19.12 - DID_ABORT does not retry the request if we
1244 * aborted this request then abort otherwise it must be a
1245 * reset.
1246 */
1247 DEBUG2(printk(KERN_INFO
1248 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
e315cd28 1249 vha->host_no, comp_status, scsi_status));
1da177e4
LT
1250
1251 cp->result = DID_RESET << 16;
1252 break;
1253
1254 case CS_TIMEOUT:
056a4483
MC
1255 /*
1256 * We are going to have the fc class block the rport
1257 * while we try to recover so instruct the mid layer
1258 * to requeue until the class decides how to handle this.
1259 */
1260 cp->result = DID_TRANSPORT_DISRUPTED << 16;
9a853f71 1261
e428924c 1262 if (IS_FWI2_CAPABLE(ha)) {
9a853f71
AV
1263 DEBUG2(printk(KERN_INFO
1264 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
e315cd28 1265 "0x%x-0x%x\n", vha->host_no, cp->device->channel,
9a853f71
AV
1266 cp->device->id, cp->device->lun, comp_status,
1267 scsi_status));
1268 break;
1269 }
1da177e4
LT
1270 DEBUG2(printk(KERN_INFO
1271 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
e315cd28 1272 "sflags=%x.\n", vha->host_no, cp->device->channel,
9a853f71
AV
1273 cp->device->id, cp->device->lun, comp_status, scsi_status,
1274 le16_to_cpu(sts->status_flags)));
1da177e4 1275
9a853f71
AV
1276 /* Check to see if logout occurred. */
1277 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
e315cd28 1278 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1da177e4
LT
1279 break;
1280
1da177e4
LT
1281 default:
1282 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
e315cd28 1283 "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1da177e4
LT
1284 qla_printk(KERN_INFO, ha,
1285 "Unknown status detected 0x%x-0x%x.\n",
1286 comp_status, scsi_status);
1287
1288 cp->result = DID_ERROR << 16;
1289 break;
1290 }
1291
1292 /* Place command on done queue. */
e315cd28 1293 if (vha->status_srb == NULL)
73208dfd 1294 qla2x00_sp_compl(ha, sp);
1da177e4
LT
1295}
1296
1297/**
1298 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1299 * @ha: SCSI driver HA context
1300 * @pkt: Entry pointer
1301 *
1302 * Extended sense data.
1303 */
1304static void
e315cd28 1305qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1da177e4
LT
1306{
1307 uint8_t sense_sz = 0;
e315cd28
AC
1308 struct qla_hw_data *ha = vha->hw;
1309 srb_t *sp = vha->status_srb;
1da177e4
LT
1310 struct scsi_cmnd *cp;
1311
1312 if (sp != NULL && sp->request_sense_length != 0) {
1313 cp = sp->cmd;
1314 if (cp == NULL) {
1315 DEBUG2(printk("%s(): Cmd already returned back to OS "
75bc4190 1316 "sp=%p.\n", __func__, sp));
1da177e4
LT
1317 qla_printk(KERN_INFO, ha,
1318 "cmd is NULL: already returned to OS (sp=%p)\n",
fa2a1ce5 1319 sp);
1da177e4 1320
e315cd28 1321 vha->status_srb = NULL;
1da177e4
LT
1322 return;
1323 }
1324
1325 if (sp->request_sense_length > sizeof(pkt->data)) {
1326 sense_sz = sizeof(pkt->data);
1327 } else {
1328 sense_sz = sp->request_sense_length;
1329 }
1330
1331 /* Move sense data. */
e428924c 1332 if (IS_FWI2_CAPABLE(ha))
9a853f71 1333 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1da177e4
LT
1334 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1335 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1336
1337 sp->request_sense_ptr += sense_sz;
1338 sp->request_sense_length -= sense_sz;
1339
1340 /* Place command on done queue. */
1341 if (sp->request_sense_length == 0) {
e315cd28 1342 vha->status_srb = NULL;
73208dfd 1343 qla2x00_sp_compl(ha, sp);
1da177e4
LT
1344 }
1345 }
1346}
1347
1348/**
1349 * qla2x00_error_entry() - Process an error entry.
1350 * @ha: SCSI driver HA context
1351 * @pkt: Entry pointer
1352 */
1353static void
73208dfd 1354qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1da177e4
LT
1355{
1356 srb_t *sp;
e315cd28 1357 struct qla_hw_data *ha = vha->hw;
73208dfd 1358 struct req_que *req = rsp->req;
1da177e4
LT
1359#if defined(QL_DEBUG_LEVEL_2)
1360 if (pkt->entry_status & RF_INV_E_ORDER)
1361 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1362 else if (pkt->entry_status & RF_INV_E_COUNT)
1363 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1364 else if (pkt->entry_status & RF_INV_E_PARAM)
fa2a1ce5 1365 qla_printk(KERN_ERR, ha,
1da177e4
LT
1366 "%s: Invalid Entry Parameter\n", __func__);
1367 else if (pkt->entry_status & RF_INV_E_TYPE)
1368 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1369 else if (pkt->entry_status & RF_BUSY)
1370 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1371 else
1372 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1373#endif
1374
1375 /* Validate handle. */
1376 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
e315cd28 1377 sp = req->outstanding_cmds[pkt->handle];
1da177e4
LT
1378 else
1379 sp = NULL;
1380
1381 if (sp) {
1382 /* Free outstanding command slot. */
e315cd28 1383 req->outstanding_cmds[pkt->handle] = NULL;
354d6b21 1384
1da177e4
LT
1385 /* Bad payload or header */
1386 if (pkt->entry_status &
1387 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1388 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1389 sp->cmd->result = DID_ERROR << 16;
1390 } else if (pkt->entry_status & RF_BUSY) {
1391 sp->cmd->result = DID_BUS_BUSY << 16;
1392 } else {
1393 sp->cmd->result = DID_ERROR << 16;
1394 }
73208dfd 1395 qla2x00_sp_compl(ha, sp);
1da177e4 1396
9a853f71
AV
1397 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1398 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1da177e4 1399 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
e315cd28 1400 vha->host_no));
1da177e4
LT
1401 qla_printk(KERN_WARNING, ha,
1402 "Error entry - invalid handle\n");
1403
e315cd28
AC
1404 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1405 qla2xxx_wake_dpc(vha);
1da177e4
LT
1406 }
1407}
1408
9a853f71
AV
1409/**
1410 * qla24xx_mbx_completion() - Process mailbox command completions.
1411 * @ha: SCSI driver HA context
1412 * @mb0: Mailbox0 register
1413 */
1414static void
e315cd28 1415qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
9a853f71
AV
1416{
1417 uint16_t cnt;
1418 uint16_t __iomem *wptr;
e315cd28 1419 struct qla_hw_data *ha = vha->hw;
9a853f71
AV
1420 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1421
1422 /* Load return mailbox registers. */
1423 ha->flags.mbox_int = 1;
1424 ha->mailbox_out[0] = mb0;
1425 wptr = (uint16_t __iomem *)&reg->mailbox1;
1426
1427 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1428 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1429 wptr++;
1430 }
1431
1432 if (ha->mcp) {
1433 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
e315cd28 1434 __func__, vha->host_no, ha->mcp->mb[0]));
9a853f71
AV
1435 } else {
1436 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
e315cd28 1437 __func__, vha->host_no));
9a853f71
AV
1438 }
1439}
1440
1441/**
1442 * qla24xx_process_response_queue() - Process response queue entries.
1443 * @ha: SCSI driver HA context
1444 */
1445void
73208dfd 1446qla24xx_process_response_queue(struct rsp_que *rsp)
9a853f71 1447{
73208dfd 1448 struct qla_hw_data *ha = rsp->hw;
9a853f71 1449 struct sts_entry_24xx *pkt;
73208dfd
AC
1450 struct scsi_qla_host *vha;
1451
1452 vha = qla2x00_get_rsp_host(rsp);
9a853f71 1453
e315cd28 1454 if (!vha->flags.online)
9a853f71
AV
1455 return;
1456
e315cd28
AC
1457 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1458 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
9a853f71 1459
e315cd28
AC
1460 rsp->ring_index++;
1461 if (rsp->ring_index == rsp->length) {
1462 rsp->ring_index = 0;
1463 rsp->ring_ptr = rsp->ring;
9a853f71 1464 } else {
e315cd28 1465 rsp->ring_ptr++;
9a853f71
AV
1466 }
1467
1468 if (pkt->entry_status != 0) {
1469 DEBUG3(printk(KERN_INFO
e315cd28 1470 "scsi(%ld): Process error entry.\n", vha->host_no));
9a853f71 1471
73208dfd 1472 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
9a853f71
AV
1473 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1474 wmb();
1475 continue;
1476 }
1477
1478 switch (pkt->entry_type) {
1479 case STATUS_TYPE:
73208dfd 1480 qla2x00_status_entry(vha, rsp, pkt);
9a853f71
AV
1481 break;
1482 case STATUS_CONT_TYPE:
e315cd28 1483 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
9a853f71 1484 break;
2c3dfe3f 1485 case VP_RPT_ID_IOCB_TYPE:
e315cd28 1486 qla24xx_report_id_acquisition(vha,
2c3dfe3f
SJ
1487 (struct vp_rpt_id_entry_24xx *)pkt);
1488 break;
9a853f71
AV
1489 default:
1490 /* Type Not Supported. */
1491 DEBUG4(printk(KERN_WARNING
1492 "scsi(%ld): Received unknown response pkt type %x "
1493 "entry status=%x.\n",
e315cd28 1494 vha->host_no, pkt->entry_type, pkt->entry_status));
9a853f71
AV
1495 break;
1496 }
1497 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1498 wmb();
1499 }
1500
1501 /* Adjust ring index */
17d98630 1502 ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
9a853f71
AV
1503}
1504
05236a05 1505static void
e315cd28 1506qla2xxx_check_risc_status(scsi_qla_host_t *vha)
05236a05
AV
1507{
1508 int rval;
1509 uint32_t cnt;
e315cd28 1510 struct qla_hw_data *ha = vha->hw;
05236a05
AV
1511 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1512
1513 if (!IS_QLA25XX(ha))
1514 return;
1515
1516 rval = QLA_SUCCESS;
1517 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1518 RD_REG_DWORD(&reg->iobase_addr);
1519 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1520 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1521 rval == QLA_SUCCESS; cnt--) {
1522 if (cnt) {
1523 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1524 udelay(10);
1525 } else
1526 rval = QLA_FUNCTION_TIMEOUT;
1527 }
1528 if (rval == QLA_SUCCESS)
1529 goto next_test;
1530
1531 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1532 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1533 rval == QLA_SUCCESS; cnt--) {
1534 if (cnt) {
1535 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1536 udelay(10);
1537 } else
1538 rval = QLA_FUNCTION_TIMEOUT;
1539 }
1540 if (rval != QLA_SUCCESS)
1541 goto done;
1542
1543next_test:
1544 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1545 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1546
1547done:
1548 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1549 RD_REG_DWORD(&reg->iobase_window);
1550}
1551
9a853f71
AV
1552/**
1553 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1554 * @irq:
1555 * @dev_id: SCSI driver HA context
9a853f71
AV
1556 *
1557 * Called by system whenever the host adapter generates an interrupt.
1558 *
1559 * Returns handled flag.
1560 */
1561irqreturn_t
7d12e780 1562qla24xx_intr_handler(int irq, void *dev_id)
9a853f71 1563{
e315cd28
AC
1564 scsi_qla_host_t *vha;
1565 struct qla_hw_data *ha;
9a853f71
AV
1566 struct device_reg_24xx __iomem *reg;
1567 int status;
9a853f71
AV
1568 unsigned long iter;
1569 uint32_t stat;
1570 uint32_t hccr;
1571 uint16_t mb[4];
e315cd28 1572 struct rsp_que *rsp;
9a853f71 1573
e315cd28
AC
1574 rsp = (struct rsp_que *) dev_id;
1575 if (!rsp) {
9a853f71 1576 printk(KERN_INFO
e315cd28 1577 "%s(): NULL response queue pointer\n", __func__);
9a853f71
AV
1578 return IRQ_NONE;
1579 }
1580
e315cd28 1581 ha = rsp->hw;
9a853f71
AV
1582 reg = &ha->iobase->isp24;
1583 status = 0;
1584
c6952483 1585 spin_lock(&ha->hardware_lock);
e315cd28 1586 vha = qla2x00_get_rsp_host(rsp);
9a853f71
AV
1587 for (iter = 50; iter--; ) {
1588 stat = RD_REG_DWORD(&reg->host_status);
1589 if (stat & HSRX_RISC_PAUSED) {
14e660e6
SJ
1590 if (pci_channel_offline(ha->pdev))
1591 break;
1592
cb8dacbf 1593 if (ha->hw_event_pause_errors == 0)
e315cd28 1594 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
cb8dacbf
AV
1595 0, MSW(stat), LSW(stat));
1596 else if (ha->hw_event_pause_errors < 0xffffffff)
1597 ha->hw_event_pause_errors++;
1598
9a853f71
AV
1599 hccr = RD_REG_DWORD(&reg->hccr);
1600
1601 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1602 "Dumping firmware!\n", hccr);
05236a05 1603
e315cd28 1604 qla2xxx_check_risc_status(vha);
05236a05 1605
e315cd28
AC
1606 ha->isp_ops->fw_dump(vha, 1);
1607 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9a853f71
AV
1608 break;
1609 } else if ((stat & HSRX_RISC_INT) == 0)
1610 break;
1611
1612 switch (stat & 0xff) {
1613 case 0x1:
1614 case 0x2:
1615 case 0x10:
1616 case 0x11:
e315cd28 1617 qla24xx_mbx_completion(vha, MSW(stat));
9a853f71
AV
1618 status |= MBX_INTERRUPT;
1619
1620 break;
1621 case 0x12:
1622 mb[0] = MSW(stat);
1623 mb[1] = RD_REG_WORD(&reg->mailbox1);
1624 mb[2] = RD_REG_WORD(&reg->mailbox2);
1625 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 1626 qla2x00_async_event(vha, rsp, mb);
9a853f71
AV
1627 break;
1628 case 0x13:
73208dfd
AC
1629 case 0x14:
1630 qla24xx_process_response_queue(rsp);
9a853f71
AV
1631 break;
1632 default:
1633 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1634 "(%d).\n",
e315cd28 1635 vha->host_no, stat & 0xff));
9a853f71
AV
1636 break;
1637 }
1638 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1639 RD_REG_DWORD_RELAXED(&reg->hccr);
1640 }
c6952483 1641 spin_unlock(&ha->hardware_lock);
9a853f71
AV
1642
1643 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1644 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
9a853f71 1645 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 1646 complete(&ha->mbx_intr_comp);
9a853f71
AV
1647 }
1648
1649 return IRQ_HANDLED;
1650}
1651
a8488abe
AV
1652static irqreturn_t
1653qla24xx_msix_rsp_q(int irq, void *dev_id)
1654{
e315cd28
AC
1655 struct qla_hw_data *ha;
1656 struct rsp_que *rsp;
a8488abe 1657 struct device_reg_24xx __iomem *reg;
a8488abe 1658
e315cd28
AC
1659 rsp = (struct rsp_que *) dev_id;
1660 if (!rsp) {
1661 printk(KERN_INFO
1662 "%s(): NULL response queue pointer\n", __func__);
1663 return IRQ_NONE;
1664 }
1665 ha = rsp->hw;
a8488abe
AV
1666 reg = &ha->iobase->isp24;
1667
0e973a24 1668 spin_lock_irq(&ha->hardware_lock);
a8488abe 1669
73208dfd 1670 qla24xx_process_response_queue(rsp);
a8488abe 1671 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
a8488abe 1672
0e973a24 1673 spin_unlock_irq(&ha->hardware_lock);
a8488abe
AV
1674
1675 return IRQ_HANDLED;
1676}
1677
73208dfd
AC
1678static irqreturn_t
1679qla25xx_msix_rsp_q(int irq, void *dev_id)
1680{
1681 struct qla_hw_data *ha;
1682 struct rsp_que *rsp;
1683 struct device_reg_24xx __iomem *reg;
1684 uint16_t msix_disabled_hccr = 0;
1685
1686 rsp = (struct rsp_que *) dev_id;
1687 if (!rsp) {
1688 printk(KERN_INFO
1689 "%s(): NULL response queue pointer\n", __func__);
1690 return IRQ_NONE;
1691 }
1692 ha = rsp->hw;
1693 reg = &ha->iobase->isp24;
1694
1695 spin_lock_irq(&ha->hardware_lock);
1696
1697 msix_disabled_hccr = rsp->options;
1698 if (!rsp->id)
1699 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
1700 else
17d98630 1701 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6);
73208dfd
AC
1702
1703 qla24xx_process_response_queue(rsp);
1704
1705 if (!msix_disabled_hccr)
1706 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1707
1708 spin_unlock_irq(&ha->hardware_lock);
1709
1710 return IRQ_HANDLED;
1711}
1712
a8488abe
AV
1713static irqreturn_t
1714qla24xx_msix_default(int irq, void *dev_id)
1715{
e315cd28
AC
1716 scsi_qla_host_t *vha;
1717 struct qla_hw_data *ha;
1718 struct rsp_que *rsp;
a8488abe
AV
1719 struct device_reg_24xx __iomem *reg;
1720 int status;
a8488abe
AV
1721 uint32_t stat;
1722 uint32_t hccr;
1723 uint16_t mb[4];
1724
e315cd28
AC
1725 rsp = (struct rsp_que *) dev_id;
1726 if (!rsp) {
1727 DEBUG(printk(
1728 "%s(): NULL response queue pointer\n", __func__));
1729 return IRQ_NONE;
1730 }
1731 ha = rsp->hw;
a8488abe
AV
1732 reg = &ha->iobase->isp24;
1733 status = 0;
1734
0e973a24 1735 spin_lock_irq(&ha->hardware_lock);
e315cd28 1736 vha = qla2x00_get_rsp_host(rsp);
87f27015 1737 do {
a8488abe
AV
1738 stat = RD_REG_DWORD(&reg->host_status);
1739 if (stat & HSRX_RISC_PAUSED) {
14e660e6
SJ
1740 if (pci_channel_offline(ha->pdev))
1741 break;
1742
cb8dacbf 1743 if (ha->hw_event_pause_errors == 0)
e315cd28 1744 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
cb8dacbf
AV
1745 0, MSW(stat), LSW(stat));
1746 else if (ha->hw_event_pause_errors < 0xffffffff)
1747 ha->hw_event_pause_errors++;
1748
a8488abe
AV
1749 hccr = RD_REG_DWORD(&reg->hccr);
1750
1751 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1752 "Dumping firmware!\n", hccr);
05236a05 1753
e315cd28 1754 qla2xxx_check_risc_status(vha);
05236a05 1755
e315cd28
AC
1756 ha->isp_ops->fw_dump(vha, 1);
1757 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
a8488abe
AV
1758 break;
1759 } else if ((stat & HSRX_RISC_INT) == 0)
1760 break;
1761
1762 switch (stat & 0xff) {
1763 case 0x1:
1764 case 0x2:
1765 case 0x10:
1766 case 0x11:
e315cd28 1767 qla24xx_mbx_completion(vha, MSW(stat));
a8488abe
AV
1768 status |= MBX_INTERRUPT;
1769
1770 break;
1771 case 0x12:
1772 mb[0] = MSW(stat);
1773 mb[1] = RD_REG_WORD(&reg->mailbox1);
1774 mb[2] = RD_REG_WORD(&reg->mailbox2);
1775 mb[3] = RD_REG_WORD(&reg->mailbox3);
73208dfd 1776 qla2x00_async_event(vha, rsp, mb);
a8488abe
AV
1777 break;
1778 case 0x13:
73208dfd
AC
1779 case 0x14:
1780 qla24xx_process_response_queue(rsp);
a8488abe
AV
1781 break;
1782 default:
1783 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1784 "(%d).\n",
e315cd28 1785 vha->host_no, stat & 0xff));
a8488abe
AV
1786 break;
1787 }
1788 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
87f27015 1789 } while (0);
0e973a24 1790 spin_unlock_irq(&ha->hardware_lock);
a8488abe
AV
1791
1792 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1793 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
a8488abe 1794 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
0b05a1f0 1795 complete(&ha->mbx_intr_comp);
a8488abe
AV
1796 }
1797
1798 return IRQ_HANDLED;
1799}
1800
1801/* Interrupt handling helpers. */
1802
1803struct qla_init_msix_entry {
1804 uint16_t entry;
1805 uint16_t index;
1806 const char *name;
476834c2 1807 irq_handler_t handler;
a8488abe
AV
1808};
1809
73208dfd
AC
1810static struct qla_init_msix_entry base_queue = {
1811 .entry = 0,
1812 .index = 0,
1813 .name = "qla2xxx (default)",
1814 .handler = qla24xx_msix_default,
1815};
1816
1817static struct qla_init_msix_entry base_rsp_queue = {
1818 .entry = 1,
1819 .index = 1,
1820 .name = "qla2xxx (rsp_q)",
1821 .handler = qla24xx_msix_rsp_q,
1822};
a8488abe 1823
73208dfd
AC
1824static struct qla_init_msix_entry multi_rsp_queue = {
1825 .entry = 1,
1826 .index = 1,
1827 .name = "qla2xxx (multi_q)",
1828 .handler = qla25xx_msix_rsp_q,
a8488abe
AV
1829};
1830
1831static void
e315cd28 1832qla24xx_disable_msix(struct qla_hw_data *ha)
a8488abe
AV
1833{
1834 int i;
1835 struct qla_msix_entry *qentry;
1836
73208dfd
AC
1837 for (i = 0; i < ha->msix_count; i++) {
1838 qentry = &ha->msix_entries[i];
a8488abe 1839 if (qentry->have_irq)
73208dfd 1840 free_irq(qentry->vector, qentry->rsp);
a8488abe
AV
1841 }
1842 pci_disable_msix(ha->pdev);
73208dfd
AC
1843 kfree(ha->msix_entries);
1844 ha->msix_entries = NULL;
1845 ha->flags.msix_enabled = 0;
a8488abe
AV
1846}
1847
1848static int
73208dfd 1849qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe
AV
1850{
1851 int i, ret;
73208dfd 1852 struct msix_entry *entries;
a8488abe 1853 struct qla_msix_entry *qentry;
73208dfd
AC
1854 struct qla_init_msix_entry *msix_queue;
1855
1856 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1857 GFP_KERNEL);
1858 if (!entries)
1859 return -ENOMEM;
a8488abe 1860
73208dfd
AC
1861 for (i = 0; i < ha->msix_count; i++)
1862 entries[i].entry = i;
a8488abe 1863
73208dfd 1864 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
a8488abe
AV
1865 if (ret) {
1866 qla_printk(KERN_WARNING, ha,
73208dfd
AC
1867 "MSI-X: Failed to enable support -- %d/%d\n"
1868 " Retry with %d vectors\n", ha->msix_count, ret, ret);
1869 ha->msix_count = ret;
1870 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1871 if (ret) {
1872 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1873 " support, giving up -- %d/%d\n",
1874 ha->msix_count, ret);
1875 goto msix_out;
1876 }
1877 ha->max_queues = ha->msix_count - 1;
1878 }
1879 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1880 ha->msix_count, GFP_KERNEL);
1881 if (!ha->msix_entries) {
1882 ret = -ENOMEM;
a8488abe
AV
1883 goto msix_out;
1884 }
1885 ha->flags.msix_enabled = 1;
1886
73208dfd
AC
1887 for (i = 0; i < ha->msix_count; i++) {
1888 qentry = &ha->msix_entries[i];
1889 qentry->vector = entries[i].vector;
1890 qentry->entry = entries[i].entry;
a8488abe 1891 qentry->have_irq = 0;
73208dfd 1892 qentry->rsp = NULL;
a8488abe
AV
1893 }
1894
73208dfd
AC
1895 /* Enable MSI-X for AENs for queue 0 */
1896 qentry = &ha->msix_entries[0];
1897 ret = request_irq(qentry->vector, base_queue.handler, 0,
1898 base_queue.name, rsp);
1899 if (ret) {
1900 qla_printk(KERN_WARNING, ha,
1901 "MSI-X: Unable to register handler -- %x/%d.\n",
1902 qentry->vector, ret);
1903 qla24xx_disable_msix(ha);
1904 goto msix_out;
1905 }
1906 qentry->have_irq = 1;
1907 qentry->rsp = rsp;
1908
1909 /* Enable MSI-X vector for response queue update for queue 0 */
1910 if (ha->max_queues > 1 && ha->mqiobase) {
1911 ha->mqenable = 1;
1912 msix_queue = &multi_rsp_queue;
1913 qla_printk(KERN_INFO, ha,
1914 "MQ enabled, Number of Queue Resources: %d \n",
1915 ha->max_queues);
1916 } else {
1917 ha->mqenable = 0;
1918 msix_queue = &base_rsp_queue;
1919 }
1920
1921 qentry = &ha->msix_entries[1];
1922 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1923 msix_queue->name, rsp);
1924 if (ret) {
1925 qla_printk(KERN_WARNING, ha,
1926 "MSI-X: Unable to register handler -- %x/%d.\n",
1927 qentry->vector, ret);
1928 qla24xx_disable_msix(ha);
1929 ha->mqenable = 0;
1930 goto msix_out;
1931 }
1932 qentry->have_irq = 1;
1933 qentry->rsp = rsp;
1934
a8488abe 1935msix_out:
73208dfd 1936 kfree(entries);
a8488abe
AV
1937 return ret;
1938}
1939
1940int
73208dfd 1941qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
a8488abe
AV
1942{
1943 int ret;
963b0fdd 1944 device_reg_t __iomem *reg = ha->iobase;
a8488abe
AV
1945
1946 /* If possible, enable MSI-X. */
4d4df193 1947 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
a8488abe
AV
1948 goto skip_msix;
1949
e315cd28
AC
1950 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1951 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
a8488abe 1952 DEBUG2(qla_printk(KERN_WARNING, ha,
e315cd28
AC
1953 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1954 ha->pdev->revision, ha->fw_attributes));
a8488abe
AV
1955
1956 goto skip_msix;
1957 }
1958
da7429f9
AV
1959 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1960 (ha->pdev->subsystem_device == 0x7040 ||
1961 ha->pdev->subsystem_device == 0x7041 ||
1962 ha->pdev->subsystem_device == 0x1705)) {
1963 DEBUG2(qla_printk(KERN_WARNING, ha,
1964 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1965 ha->pdev->subsystem_vendor,
1966 ha->pdev->subsystem_device));
1967
1968 goto skip_msi;
1969 }
1970
73208dfd 1971 ret = qla24xx_enable_msix(ha, rsp);
a8488abe
AV
1972 if (!ret) {
1973 DEBUG2(qla_printk(KERN_INFO, ha,
1974 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1975 ha->fw_attributes));
963b0fdd 1976 goto clear_risc_ints;
a8488abe
AV
1977 }
1978 qla_printk(KERN_WARNING, ha,
1979 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1980skip_msix:
cbedb601 1981
4d4df193 1982 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
cbedb601
AV
1983 goto skip_msi;
1984
1985 ret = pci_enable_msi(ha->pdev);
1986 if (!ret) {
1987 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1988 ha->flags.msi_enabled = 1;
1989 }
1990skip_msi:
1991
fd34f556 1992 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
e315cd28 1993 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
963b0fdd 1994 if (ret) {
a8488abe
AV
1995 qla_printk(KERN_WARNING, ha,
1996 "Failed to reserve interrupt %d already in use.\n",
1997 ha->pdev->irq);
963b0fdd
AV
1998 goto fail;
1999 }
2000 ha->flags.inta_enabled = 1;
963b0fdd
AV
2001clear_risc_ints:
2002
c6952483 2003 spin_lock_irq(&ha->hardware_lock);
963b0fdd
AV
2004 if (IS_FWI2_CAPABLE(ha)) {
2005 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2006 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2007 } else {
2008 WRT_REG_WORD(&reg->isp.semaphore, 0);
2009 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2010 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
a8488abe 2011 }
c6952483 2012 spin_unlock_irq(&ha->hardware_lock);
a8488abe 2013
963b0fdd 2014fail:
a8488abe
AV
2015 return ret;
2016}
2017
2018void
e315cd28 2019qla2x00_free_irqs(scsi_qla_host_t *vha)
a8488abe 2020{
e315cd28 2021 struct qla_hw_data *ha = vha->hw;
73208dfd 2022 struct rsp_que *rsp = ha->rsp_q_map[0];
a8488abe
AV
2023
2024 if (ha->flags.msix_enabled)
2025 qla24xx_disable_msix(ha);
cbedb601 2026 else if (ha->flags.inta_enabled) {
e315cd28 2027 free_irq(ha->pdev->irq, rsp);
cbedb601
AV
2028 pci_disable_msi(ha->pdev);
2029 }
a8488abe 2030}
e315cd28
AC
2031
2032static struct scsi_qla_host *
2033qla2x00_get_rsp_host(struct rsp_que *rsp)
2034{
2035 srb_t *sp;
2036 struct qla_hw_data *ha = rsp->hw;
2037 struct scsi_qla_host *vha = NULL;
73208dfd
AC
2038 struct sts_entry_24xx *pkt;
2039 struct req_que *req;
2040
2041 if (rsp->id) {
2042 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2043 req = rsp->req;
2044 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2045 sp = req->outstanding_cmds[pkt->handle];
2046 if (sp)
2047 vha = sp->vha;
2048 }
e315cd28
AC
2049 }
2050 if (!vha)
73208dfd 2051 /* handle it in base queue */
e315cd28
AC
2052 vha = pci_get_drvdata(ha->pdev);
2053
2054 return vha;
2055}
73208dfd
AC
2056
2057int qla25xx_request_irq(struct rsp_que *rsp)
2058{
2059 struct qla_hw_data *ha = rsp->hw;
2060 struct qla_init_msix_entry *intr = &multi_rsp_queue;
2061 struct qla_msix_entry *msix = rsp->msix;
2062 int ret;
2063
2064 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2065 if (ret) {
2066 qla_printk(KERN_WARNING, ha,
2067 "MSI-X: Unable to register handler -- %x/%d.\n",
2068 msix->vector, ret);
2069 return ret;
2070 }
2071 msix->have_irq = 1;
2072 msix->rsp = rsp;
2073 return ret;
2074}
2075
17d98630
AC
2076void
2077qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2078{
2079 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
2080 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
2081}
2082
2083void
2084qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2085{
2086 device_reg_t __iomem *reg = (void *) ha->iobase;
2087 WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
2088}
2089