2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
12 #include <scsi/scsi_tcq.h>
14 static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
16 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
19 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
22 * Returns the proper CF_* direction based on CDB.
24 static inline uint16_t
25 qla2x00_get_cmd_direction(srb_t *sp)
31 /* Set transfer direction */
32 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
34 sp->fcport->vha->hw->qla_stats.output_bytes +=
35 scsi_bufflen(sp->cmd);
36 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
38 sp->fcport->vha->hw->qla_stats.input_bytes +=
39 scsi_bufflen(sp->cmd);
45 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46 * Continuation Type 0 IOCBs to allocate.
48 * @dsds: number of data segment decriptors needed
50 * Returns the number of IOCB entries needed to store @dsds.
53 qla2x00_calc_iocbs_32(uint16_t dsds)
59 iocbs += (dsds - 3) / 7;
67 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68 * Continuation Type 1 IOCBs to allocate.
70 * @dsds: number of data segment decriptors needed
72 * Returns the number of IOCB entries needed to store @dsds.
75 qla2x00_calc_iocbs_64(uint16_t dsds)
81 iocbs += (dsds - 2) / 5;
89 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
92 * Returns a pointer to the Continuation Type 0 IOCB packet.
94 static inline cont_entry_t *
95 qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
97 cont_entry_t *cont_pkt;
98 /* Adjust ring index. */
100 if (req->ring_index == req->length) {
102 req->ring_ptr = req->ring;
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) =
111 __constant_cpu_to_le32(CONTINUE_TYPE);
117 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
120 * Returns a pointer to the continuation type 1 IOCB packet.
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
125 cont_a64_entry_t *cont_pkt;
127 /* Adjust ring index. */
129 if (req->ring_index == req->length) {
131 req->ring_ptr = req->ring;
136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt->entry_type)) =
140 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
146 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
147 * capable IOCB types.
149 * @sp: SRB command to process
150 * @cmd_pkt: Command type 2 IOCB
151 * @tot_dsds: Total number of segments to transfer
153 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
158 scsi_qla_host_t *vha;
159 struct scsi_cmnd *cmd;
160 struct scatterlist *sg;
167 /* Update entry type to indicate Command Type 2 IOCB */
168 *((uint32_t *)(&cmd_pkt->entry_type)) =
169 __constant_cpu_to_le32(COMMAND_TYPE);
171 /* No data transfer */
172 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
173 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
178 que_id = vha->req_ques[0];
179 req = vha->hw->req_q_map[que_id];
181 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
183 /* Three DSDs are available in the Command Type 2 IOCB */
185 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
187 /* Load data segments */
188 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
189 cont_entry_t *cont_pkt;
191 /* Allocate additional continuation packets? */
192 if (avail_dsds == 0) {
194 * Seven DSDs are available in the Continuation
197 cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
198 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
202 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
203 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
209 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
210 * capable IOCB types.
212 * @sp: SRB command to process
213 * @cmd_pkt: Command type 3 IOCB
214 * @tot_dsds: Total number of segments to transfer
216 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
221 scsi_qla_host_t *vha;
222 struct scsi_cmnd *cmd;
223 struct scatterlist *sg;
230 /* Update entry type to indicate Command Type 3 IOCB */
231 *((uint32_t *)(&cmd_pkt->entry_type)) =
232 __constant_cpu_to_le32(COMMAND_A64_TYPE);
234 /* No data transfer */
235 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
236 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
241 que_id = vha->req_ques[0];
242 req = vha->hw->req_q_map[que_id];
244 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
246 /* Two DSDs are available in the Command Type 3 IOCB */
248 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
250 /* Load data segments */
251 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
253 cont_a64_entry_t *cont_pkt;
255 /* Allocate additional continuation packets? */
256 if (avail_dsds == 0) {
258 * Five DSDs are available in the Continuation
261 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
262 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
266 sle_dma = sg_dma_address(sg);
267 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
268 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
269 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
275 * qla2x00_start_scsi() - Send a SCSI command to the ISP
276 * @sp: command to send to the ISP
278 * Returns non-zero if a failure occurred, else zero.
281 qla2x00_start_scsi(srb_t *sp)
285 scsi_qla_host_t *vha;
286 struct scsi_cmnd *cmd;
290 cmd_entry_t *cmd_pkt;
294 struct device_reg_2xxx __iomem *reg;
295 struct qla_hw_data *ha;
299 /* Setup device pointers. */
303 reg = &ha->iobase->isp;
305 req = ha->req_q_map[0];
306 rsp = ha->rsp_q_map[0];
307 /* So we know we haven't pci_map'ed anything yet */
310 /* Send marker if required */
311 if (vha->marker_needed != 0) {
312 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
314 return (QLA_FUNCTION_FAILED);
315 vha->marker_needed = 0;
318 /* Acquire ring specific lock */
319 spin_lock_irqsave(&ha->hardware_lock, flags);
321 /* Check for room in outstanding command list. */
322 handle = req->current_outstanding_cmd;
323 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
325 if (handle == MAX_OUTSTANDING_COMMANDS)
327 if (!req->outstanding_cmds[handle])
330 if (index == MAX_OUTSTANDING_COMMANDS)
333 /* Map the sg table so we have an accurate count of sg entries needed */
334 if (scsi_sg_count(cmd)) {
335 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
336 scsi_sg_count(cmd), cmd->sc_data_direction);
344 /* Calculate the number of request entries needed. */
345 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
346 if (req->cnt < (req_cnt + 2)) {
347 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
348 if (req->ring_index < cnt)
349 req->cnt = cnt - req->ring_index;
351 req->cnt = req->length -
352 (req->ring_index - cnt);
354 if (req->cnt < (req_cnt + 2))
357 /* Build command packet */
358 req->current_outstanding_cmd = handle;
359 req->outstanding_cmds[handle] = sp;
361 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
364 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
365 cmd_pkt->handle = handle;
366 /* Zero out remaining portion of packet. */
367 clr_ptr = (uint32_t *)cmd_pkt + 2;
368 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
369 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
371 /* Set target ID and LUN number*/
372 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
373 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
375 /* Update tagged queuing modifier */
376 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
378 /* Load SCSI command packet. */
379 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
380 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
382 /* Build IOCB segments */
383 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
385 /* Set total data segment count. */
386 cmd_pkt->entry_count = (uint8_t)req_cnt;
389 /* Adjust ring index. */
391 if (req->ring_index == req->length) {
393 req->ring_ptr = req->ring;
397 sp->flags |= SRB_DMA_VALID;
399 /* Set chip new ring index. */
400 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
401 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
403 /* Manage unprocessed RIO/ZIO commands in response queue. */
404 if (vha->flags.process_response_queue &&
405 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
406 qla2x00_process_response_queue(rsp);
408 spin_unlock_irqrestore(&ha->hardware_lock, flags);
409 return (QLA_SUCCESS);
415 spin_unlock_irqrestore(&ha->hardware_lock, flags);
417 return (QLA_FUNCTION_FAILED);
421 * qla2x00_marker() - Send a marker IOCB to the firmware.
425 * @type: marker modifier
427 * Can be called from both normal and interrupt context.
429 * Returns non-zero if a failure occurred, else zero.
432 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
433 struct rsp_que *rsp, uint16_t loop_id,
434 uint16_t lun, uint8_t type)
437 struct mrk_entry_24xx *mrk24;
438 struct qla_hw_data *ha = vha->hw;
439 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
442 mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
444 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
445 __func__, base_vha->host_no));
447 return (QLA_FUNCTION_FAILED);
450 mrk->entry_type = MARKER_TYPE;
451 mrk->modifier = type;
452 if (type != MK_SYNC_ALL) {
453 if (IS_FWI2_CAPABLE(ha)) {
454 mrk24 = (struct mrk_entry_24xx *) mrk;
455 mrk24->nport_handle = cpu_to_le16(loop_id);
456 mrk24->lun[1] = LSB(lun);
457 mrk24->lun[2] = MSB(lun);
458 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
459 mrk24->vp_index = vha->vp_idx;
461 SET_TARGET_ID(ha, mrk->target, loop_id);
462 mrk->lun = cpu_to_le16(lun);
467 qla2x00_isp_cmd(vha, req);
469 return (QLA_SUCCESS);
473 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
474 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
478 unsigned long flags = 0;
480 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
481 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
482 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
488 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
491 * Note: The caller must hold the hardware lock before calling this routine.
493 * Returns NULL if function failed, else, a pointer to the request packet.
496 qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
499 struct qla_hw_data *ha = vha->hw;
500 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
501 request_t *pkt = NULL;
505 uint16_t req_cnt = 1;
507 /* Wait 1 second for slot. */
508 for (timer = HZ; timer; timer--) {
509 if ((req_cnt + 2) >= req->cnt) {
510 /* Calculate number of free request entries. */
513 RD_REG_DWORD(®->isp25mq.req_q_out);
515 if (IS_FWI2_CAPABLE(ha))
516 cnt = (uint16_t)RD_REG_DWORD(
517 ®->isp24.req_q_out);
519 cnt = qla2x00_debounce_register(
520 ISP_REQ_Q_OUT(ha, ®->isp));
522 if (req->ring_index < cnt)
523 req->cnt = cnt - req->ring_index;
525 req->cnt = req->length -
526 (req->ring_index - cnt);
528 /* If room for request in request ring. */
529 if ((req_cnt + 2) < req->cnt) {
533 /* Zero out packet. */
534 dword_ptr = (uint32_t *)pkt;
535 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
538 /* Set system defined field. */
539 pkt->sys_define = (uint8_t)req->ring_index;
541 /* Set entry count. */
542 pkt->entry_count = 1;
547 /* Release ring specific lock */
548 spin_unlock_irq(&ha->hardware_lock);
550 udelay(2); /* 2 us */
552 /* Check for pending interrupts. */
553 /* During init we issue marker directly */
554 if (!vha->marker_needed && !vha->flags.init_done)
556 spin_lock_irq(&ha->hardware_lock);
559 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
566 * qla2x00_isp_cmd() - Modify the request ring pointer.
569 * Note: The caller must hold the hardware lock before calling this routine.
572 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
574 struct qla_hw_data *ha = vha->hw;
575 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
577 DEBUG5(printk("%s(): IOCB data:\n", __func__));
578 DEBUG5(qla2x00_dump_buffer(
579 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
581 /* Adjust ring index. */
583 if (req->ring_index == req->length) {
585 req->ring_ptr = req->ring;
589 /* Set chip new ring index. */
591 RD_REG_DWORD(®->isp25mq.req_q_out);
593 if (IS_FWI2_CAPABLE(ha)) {
594 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
595 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
597 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
599 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
606 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
607 * Continuation Type 1 IOCBs to allocate.
609 * @dsds: number of data segment decriptors needed
611 * Returns the number of IOCB entries needed to store @dsds.
613 static inline uint16_t
614 qla24xx_calc_iocbs(uint16_t dsds)
620 iocbs += (dsds - 1) / 5;
628 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
631 * @sp: SRB command to process
632 * @cmd_pkt: Command type 3 IOCB
633 * @tot_dsds: Total number of segments to transfer
636 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
641 scsi_qla_host_t *vha;
642 struct scsi_cmnd *cmd;
643 struct scatterlist *sg;
650 /* Update entry type to indicate Command Type 3 IOCB */
651 *((uint32_t *)(&cmd_pkt->entry_type)) =
652 __constant_cpu_to_le32(COMMAND_TYPE_7);
654 /* No data transfer */
655 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
656 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
661 que_id = vha->req_ques[0];
662 req = vha->hw->req_q_map[que_id];
664 /* Set transfer direction */
665 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
666 cmd_pkt->task_mgmt_flags =
667 __constant_cpu_to_le16(TMF_WRITE_DATA);
668 sp->fcport->vha->hw->qla_stats.output_bytes +=
669 scsi_bufflen(sp->cmd);
670 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
671 cmd_pkt->task_mgmt_flags =
672 __constant_cpu_to_le16(TMF_READ_DATA);
673 sp->fcport->vha->hw->qla_stats.input_bytes +=
674 scsi_bufflen(sp->cmd);
677 /* One DSD is available in the Command Type 3 IOCB */
679 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
681 /* Load data segments */
683 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
685 cont_a64_entry_t *cont_pkt;
687 /* Allocate additional continuation packets? */
688 if (avail_dsds == 0) {
690 * Five DSDs are available in the Continuation
693 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
694 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
698 sle_dma = sg_dma_address(sg);
699 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
700 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
701 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
708 * qla24xx_start_scsi() - Send a SCSI command to the ISP
709 * @sp: command to send to the ISP
711 * Returns non-zero if a failure occurred, else zero.
714 qla24xx_start_scsi(srb_t *sp)
721 struct cmd_type_7 *cmd_pkt;
725 struct req_que *req = NULL;
726 struct rsp_que *rsp = NULL;
727 struct scsi_cmnd *cmd = sp->cmd;
728 struct scsi_qla_host *vha = sp->vha;
729 struct qla_hw_data *ha = vha->hw;
730 device_reg_t __iomem *reg;
733 /* Setup device pointers. */
735 que_id = vha->req_ques[0];
737 req = ha->req_q_map[que_id];
738 reg = ISP_QUE_REG(ha, req->id);
743 rsp = ha->rsp_q_map[que_id];
744 /* So we know we haven't pci_map'ed anything yet */
747 /* Send marker if required */
748 if (vha->marker_needed != 0) {
749 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
751 return QLA_FUNCTION_FAILED;
752 vha->marker_needed = 0;
755 /* Acquire ring specific lock */
756 spin_lock_irqsave(&ha->hardware_lock, flags);
758 /* Check for room in outstanding command list. */
759 handle = req->current_outstanding_cmd;
760 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
762 if (handle == MAX_OUTSTANDING_COMMANDS)
764 if (!req->outstanding_cmds[handle])
767 if (index == MAX_OUTSTANDING_COMMANDS)
770 /* Map the sg table so we have an accurate count of sg entries needed */
771 if (scsi_sg_count(cmd)) {
772 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
773 scsi_sg_count(cmd), cmd->sc_data_direction);
781 req_cnt = qla24xx_calc_iocbs(tot_dsds);
782 if (req->cnt < (req_cnt + 2)) {
785 RD_REG_DWORD_RELAXED(®->isp25mq.req_q_out);
788 RD_REG_DWORD_RELAXED(®->isp24.req_q_out);
790 if (req->ring_index < cnt)
791 req->cnt = cnt - req->ring_index;
793 req->cnt = req->length -
794 (req->ring_index - cnt);
796 if (req->cnt < (req_cnt + 2))
799 /* Build command packet. */
800 req->current_outstanding_cmd = handle;
801 req->outstanding_cmds[handle] = sp;
803 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
806 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
807 cmd_pkt->handle = handle;
809 /* Zero out remaining portion of packet. */
810 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
811 clr_ptr = (uint32_t *)cmd_pkt + 2;
812 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
813 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
815 /* Set NPORT-ID and LUN number*/
816 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
817 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
818 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
819 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
820 cmd_pkt->vp_index = sp->fcport->vp_idx;
822 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
823 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
825 /* Load SCSI command packet. */
826 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
827 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
829 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
831 /* Build IOCB segments */
832 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
834 /* Set total data segment count. */
835 cmd_pkt->entry_count = (uint8_t)req_cnt;
838 /* Adjust ring index. */
840 if (req->ring_index == req->length) {
842 req->ring_ptr = req->ring;
846 sp->flags |= SRB_DMA_VALID;
848 /* Set chip new ring index. */
850 WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index);
852 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
853 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
856 /* Manage unprocessed RIO/ZIO commands in response queue. */
857 if (vha->flags.process_response_queue &&
858 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
859 qla24xx_process_response_queue(rsp);
861 spin_unlock_irqrestore(&ha->hardware_lock, flags);
868 spin_unlock_irqrestore(&ha->hardware_lock, flags);
870 return QLA_FUNCTION_FAILED;