]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/scsi/qla2xxx/qla_iocb.c
0c145c9e0cd9b0382290729a0a64b3dd958cdfec
[net-next-2.6.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static request_t *qla2x00_req_pkt(scsi_qla_host_t *);
15 static void qla2x00_isp_cmd(scsi_qla_host_t *);
16
17 /**
18  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19  * @cmd: SCSI command
20  *
21  * Returns the proper CF_* direction based on CDB.
22  */
23 static inline uint16_t
24 qla2x00_get_cmd_direction(srb_t *sp)
25 {
26         uint16_t cflags;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 sp->fcport->vha->hw->qla_stats.output_bytes +=
34                     scsi_bufflen(sp->cmd);
35         } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 sp->fcport->vha->hw->qla_stats.input_bytes +=
38                     scsi_bufflen(sp->cmd);
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->hw->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) =
111             __constant_cpu_to_le32(CONTINUE_TYPE);
112
113         return (cont_pkt);
114 }
115
116 /**
117  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118  * @ha: HA context
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124 {
125         cont_a64_entry_t *cont_pkt;
126         struct req_que *req = vha->hw->req;
127
128         /* Adjust ring index. */
129         req->ring_index++;
130         if (req->ring_index == req->length) {
131                 req->ring_index = 0;
132                 req->ring_ptr = req->ring;
133         } else {
134                 req->ring_ptr++;
135         }
136
137         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138
139         /* Load packet defaults. */
140         *((uint32_t *)(&cont_pkt->entry_type)) =
141             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
142
143         return (cont_pkt);
144 }
145
146 /**
147  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
148  * capable IOCB types.
149  *
150  * @sp: SRB command to process
151  * @cmd_pkt: Command type 2 IOCB
152  * @tot_dsds: Total number of segments to transfer
153  */
154 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
155     uint16_t tot_dsds)
156 {
157         uint16_t        avail_dsds;
158         uint32_t        *cur_dsd;
159         scsi_qla_host_t *vha;
160         struct scsi_cmnd *cmd;
161         struct scatterlist *sg;
162         int i;
163
164         cmd = sp->cmd;
165
166         /* Update entry type to indicate Command Type 2 IOCB */
167         *((uint32_t *)(&cmd_pkt->entry_type)) =
168             __constant_cpu_to_le32(COMMAND_TYPE);
169
170         /* No data transfer */
171         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
172                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
173                 return;
174         }
175
176         vha = sp->vha;
177
178         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
179
180         /* Three DSDs are available in the Command Type 2 IOCB */
181         avail_dsds = 3;
182         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
183
184         /* Load data segments */
185         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
186                 cont_entry_t *cont_pkt;
187
188                 /* Allocate additional continuation packets? */
189                 if (avail_dsds == 0) {
190                         /*
191                          * Seven DSDs are available in the Continuation
192                          * Type 0 IOCB.
193                          */
194                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
195                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
196                         avail_dsds = 7;
197                 }
198
199                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
200                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
201                 avail_dsds--;
202         }
203 }
204
205 /**
206  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
207  * capable IOCB types.
208  *
209  * @sp: SRB command to process
210  * @cmd_pkt: Command type 3 IOCB
211  * @tot_dsds: Total number of segments to transfer
212  */
213 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
214     uint16_t tot_dsds)
215 {
216         uint16_t        avail_dsds;
217         uint32_t        *cur_dsd;
218         scsi_qla_host_t *vha;
219         struct scsi_cmnd *cmd;
220         struct scatterlist *sg;
221         int i;
222
223         cmd = sp->cmd;
224
225         /* Update entry type to indicate Command Type 3 IOCB */
226         *((uint32_t *)(&cmd_pkt->entry_type)) =
227             __constant_cpu_to_le32(COMMAND_A64_TYPE);
228
229         /* No data transfer */
230         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
231                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
232                 return;
233         }
234
235         vha = sp->vha;
236
237         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
238
239         /* Two DSDs are available in the Command Type 3 IOCB */
240         avail_dsds = 2;
241         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
242
243         /* Load data segments */
244         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
245                 dma_addr_t      sle_dma;
246                 cont_a64_entry_t *cont_pkt;
247
248                 /* Allocate additional continuation packets? */
249                 if (avail_dsds == 0) {
250                         /*
251                          * Five DSDs are available in the Continuation
252                          * Type 1 IOCB.
253                          */
254                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
255                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
256                         avail_dsds = 5;
257                 }
258
259                 sle_dma = sg_dma_address(sg);
260                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
261                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
262                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
263                 avail_dsds--;
264         }
265 }
266
267 /**
268  * qla2x00_start_scsi() - Send a SCSI command to the ISP
269  * @sp: command to send to the ISP
270  *
271  * Returns non-zero if a failure occurred, else zero.
272  */
273 int
274 qla2x00_start_scsi(srb_t *sp)
275 {
276         int             ret, nseg;
277         unsigned long   flags;
278         scsi_qla_host_t *vha;
279         struct scsi_cmnd *cmd;
280         uint32_t        *clr_ptr;
281         uint32_t        index;
282         uint32_t        handle;
283         cmd_entry_t     *cmd_pkt;
284         uint16_t        cnt;
285         uint16_t        req_cnt;
286         uint16_t        tot_dsds;
287         struct device_reg_2xxx __iomem *reg;
288         struct qla_hw_data *ha;
289         struct req_que *req;
290
291         /* Setup device pointers. */
292         ret = 0;
293         vha = sp->vha;
294         ha = vha->hw;
295         reg = &ha->iobase->isp;
296         cmd = sp->cmd;
297         req = ha->req;
298         /* So we know we haven't pci_map'ed anything yet */
299         tot_dsds = 0;
300
301         /* Send marker if required */
302         if (vha->marker_needed != 0) {
303                 if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
304                         return (QLA_FUNCTION_FAILED);
305                 vha->marker_needed = 0;
306         }
307
308         /* Acquire ring specific lock */
309         spin_lock_irqsave(&ha->hardware_lock, flags);
310
311         /* Check for room in outstanding command list. */
312         handle = req->current_outstanding_cmd;
313         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
314                 handle++;
315                 if (handle == MAX_OUTSTANDING_COMMANDS)
316                         handle = 1;
317                 if (!req->outstanding_cmds[handle])
318                         break;
319         }
320         if (index == MAX_OUTSTANDING_COMMANDS)
321                 goto queuing_error;
322
323         /* Map the sg table so we have an accurate count of sg entries needed */
324         if (scsi_sg_count(cmd)) {
325                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
326                     scsi_sg_count(cmd), cmd->sc_data_direction);
327                 if (unlikely(!nseg))
328                         goto queuing_error;
329         } else
330                 nseg = 0;
331
332         tot_dsds = nseg;
333
334         /* Calculate the number of request entries needed. */
335         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
336         if (req->cnt < (req_cnt + 2)) {
337                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
338                 if (req->ring_index < cnt)
339                         req->cnt = cnt - req->ring_index;
340                 else
341                         req->cnt = req->length -
342                             (req->ring_index - cnt);
343         }
344         if (req->cnt < (req_cnt + 2))
345                 goto queuing_error;
346
347         /* Build command packet */
348         req->current_outstanding_cmd = handle;
349         req->outstanding_cmds[handle] = sp;
350         sp->vha = vha;
351         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
352         req->cnt -= req_cnt;
353
354         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
355         cmd_pkt->handle = handle;
356         /* Zero out remaining portion of packet. */
357         clr_ptr = (uint32_t *)cmd_pkt + 2;
358         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
359         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
360
361         /* Set target ID and LUN number*/
362         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
363         cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
364
365         /* Update tagged queuing modifier */
366         cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
367
368         /* Load SCSI command packet. */
369         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
370         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
371
372         /* Build IOCB segments */
373         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
374
375         /* Set total data segment count. */
376         cmd_pkt->entry_count = (uint8_t)req_cnt;
377         wmb();
378
379         /* Adjust ring index. */
380         req->ring_index++;
381         if (req->ring_index == req->length) {
382                 req->ring_index = 0;
383                 req->ring_ptr = req->ring;
384         } else
385                 req->ring_ptr++;
386
387         sp->flags |= SRB_DMA_VALID;
388
389         /* Set chip new ring index. */
390         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
391         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
392
393         /* Manage unprocessed RIO/ZIO commands in response queue. */
394         if (vha->flags.process_response_queue &&
395             ha->rsp->ring_ptr->signature != RESPONSE_PROCESSED)
396                 qla2x00_process_response_queue(vha);
397
398         spin_unlock_irqrestore(&ha->hardware_lock, flags);
399         return (QLA_SUCCESS);
400
401 queuing_error:
402         if (tot_dsds)
403                 scsi_dma_unmap(cmd);
404
405         spin_unlock_irqrestore(&ha->hardware_lock, flags);
406
407         return (QLA_FUNCTION_FAILED);
408 }
409
410 /**
411  * qla2x00_marker() - Send a marker IOCB to the firmware.
412  * @ha: HA context
413  * @loop_id: loop ID
414  * @lun: LUN
415  * @type: marker modifier
416  *
417  * Can be called from both normal and interrupt context.
418  *
419  * Returns non-zero if a failure occurred, else zero.
420  */
421 int
422 __qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun,
423     uint8_t type)
424 {
425         mrk_entry_t *mrk;
426         struct mrk_entry_24xx *mrk24;
427         struct qla_hw_data *ha = vha->hw;
428         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
429
430         mrk24 = NULL;
431         mrk = (mrk_entry_t *)qla2x00_req_pkt(base_vha);
432         if (mrk == NULL) {
433                 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
434                     __func__, base_vha->host_no));
435
436                 return (QLA_FUNCTION_FAILED);
437         }
438
439         mrk->entry_type = MARKER_TYPE;
440         mrk->modifier = type;
441         if (type != MK_SYNC_ALL) {
442                 if (IS_FWI2_CAPABLE(ha)) {
443                         mrk24 = (struct mrk_entry_24xx *) mrk;
444                         mrk24->nport_handle = cpu_to_le16(loop_id);
445                         mrk24->lun[1] = LSB(lun);
446                         mrk24->lun[2] = MSB(lun);
447                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
448                         mrk24->vp_index = vha->vp_idx;
449                 } else {
450                         SET_TARGET_ID(ha, mrk->target, loop_id);
451                         mrk->lun = cpu_to_le16(lun);
452                 }
453         }
454         wmb();
455
456         qla2x00_isp_cmd(base_vha);
457
458         return (QLA_SUCCESS);
459 }
460
461 int
462 qla2x00_marker(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t lun,
463     uint8_t type)
464 {
465         int ret;
466         unsigned long flags = 0;
467         struct qla_hw_data *ha = vha->hw;
468
469         spin_lock_irqsave(&ha->hardware_lock, flags);
470         ret = __qla2x00_marker(vha, loop_id, lun, type);
471         spin_unlock_irqrestore(&ha->hardware_lock, flags);
472
473         return (ret);
474 }
475
476 /**
477  * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
478  * @ha: HA context
479  *
480  * Note: The caller must hold the hardware lock before calling this routine.
481  *
482  * Returns NULL if function failed, else, a pointer to the request packet.
483  */
484 static request_t *
485 qla2x00_req_pkt(scsi_qla_host_t *vha)
486 {
487         struct qla_hw_data *ha = vha->hw;
488         device_reg_t __iomem *reg = ha->iobase;
489         request_t       *pkt = NULL;
490         uint16_t        cnt;
491         uint32_t        *dword_ptr;
492         uint32_t        timer;
493         uint16_t        req_cnt = 1;
494         struct req_que *req = ha->req;
495
496         /* Wait 1 second for slot. */
497         for (timer = HZ; timer; timer--) {
498                 if ((req_cnt + 2) >= req->cnt) {
499                         /* Calculate number of free request entries. */
500                         if (IS_FWI2_CAPABLE(ha))
501                                 cnt = (uint16_t)RD_REG_DWORD(
502                                     &reg->isp24.req_q_out);
503                         else
504                                 cnt = qla2x00_debounce_register(
505                                     ISP_REQ_Q_OUT(ha, &reg->isp));
506                         if  (req->ring_index < cnt)
507                                 req->cnt = cnt - req->ring_index;
508                         else
509                                 req->cnt = req->length -
510                                     (req->ring_index - cnt);
511                 }
512                 /* If room for request in request ring. */
513                 if ((req_cnt + 2) < req->cnt) {
514                         req->cnt--;
515                         pkt = req->ring_ptr;
516
517                         /* Zero out packet. */
518                         dword_ptr = (uint32_t *)pkt;
519                         for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
520                                 *dword_ptr++ = 0;
521
522                         /* Set system defined field. */
523                         pkt->sys_define = (uint8_t)req->ring_index;
524
525                         /* Set entry count. */
526                         pkt->entry_count = 1;
527
528                         break;
529                 }
530
531                 /* Release ring specific lock */
532                 spin_unlock_irq(&ha->hardware_lock);
533
534                 udelay(2);   /* 2 us */
535
536                 /* Check for pending interrupts. */
537                 /* During init we issue marker directly */
538                 if (!vha->marker_needed && !vha->flags.init_done)
539                         qla2x00_poll(ha->rsp);
540                 spin_lock_irq(&ha->hardware_lock);
541         }
542         if (!pkt) {
543                 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
544         }
545
546         return (pkt);
547 }
548
549 /**
550  * qla2x00_isp_cmd() - Modify the request ring pointer.
551  * @ha: HA context
552  *
553  * Note: The caller must hold the hardware lock before calling this routine.
554  */
555 static void
556 qla2x00_isp_cmd(scsi_qla_host_t *vha)
557 {
558         struct qla_hw_data *ha = vha->hw;
559         device_reg_t __iomem *reg = ha->iobase;
560         struct req_que *req = ha->req;
561
562         DEBUG5(printk("%s(): IOCB data:\n", __func__));
563         DEBUG5(qla2x00_dump_buffer(
564             (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
565
566         /* Adjust ring index. */
567         req->ring_index++;
568         if (req->ring_index == req->length) {
569                 req->ring_index = 0;
570                 req->ring_ptr = req->ring;
571         } else
572                 req->ring_ptr++;
573
574         /* Set chip new ring index. */
575         if (IS_FWI2_CAPABLE(ha)) {
576                 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
577                 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
578         } else {
579                 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
580                 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
581         }
582
583 }
584
585 /**
586  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
587  * Continuation Type 1 IOCBs to allocate.
588  *
589  * @dsds: number of data segment decriptors needed
590  *
591  * Returns the number of IOCB entries needed to store @dsds.
592  */
593 static inline uint16_t
594 qla24xx_calc_iocbs(uint16_t dsds)
595 {
596         uint16_t iocbs;
597
598         iocbs = 1;
599         if (dsds > 1) {
600                 iocbs += (dsds - 1) / 5;
601                 if ((dsds - 1) % 5)
602                         iocbs++;
603         }
604         return iocbs;
605 }
606
607 /**
608  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
609  * IOCB types.
610  *
611  * @sp: SRB command to process
612  * @cmd_pkt: Command type 3 IOCB
613  * @tot_dsds: Total number of segments to transfer
614  */
615 static inline void
616 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
617     uint16_t tot_dsds)
618 {
619         uint16_t        avail_dsds;
620         uint32_t        *cur_dsd;
621         scsi_qla_host_t *vha;
622         struct scsi_cmnd *cmd;
623         struct scatterlist *sg;
624         int i;
625
626         cmd = sp->cmd;
627
628         /* Update entry type to indicate Command Type 3 IOCB */
629         *((uint32_t *)(&cmd_pkt->entry_type)) =
630             __constant_cpu_to_le32(COMMAND_TYPE_7);
631
632         /* No data transfer */
633         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
634                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
635                 return;
636         }
637
638         vha = sp->vha;
639
640         /* Set transfer direction */
641         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
642                 cmd_pkt->task_mgmt_flags =
643                     __constant_cpu_to_le16(TMF_WRITE_DATA);
644                 sp->fcport->vha->hw->qla_stats.output_bytes +=
645                     scsi_bufflen(sp->cmd);
646         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
647                 cmd_pkt->task_mgmt_flags =
648                     __constant_cpu_to_le16(TMF_READ_DATA);
649                 sp->fcport->vha->hw->qla_stats.input_bytes +=
650                     scsi_bufflen(sp->cmd);
651         }
652
653         /* One DSD is available in the Command Type 3 IOCB */
654         avail_dsds = 1;
655         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
656
657         /* Load data segments */
658
659         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
660                 dma_addr_t      sle_dma;
661                 cont_a64_entry_t *cont_pkt;
662
663                 /* Allocate additional continuation packets? */
664                 if (avail_dsds == 0) {
665                         /*
666                          * Five DSDs are available in the Continuation
667                          * Type 1 IOCB.
668                          */
669                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
670                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
671                         avail_dsds = 5;
672                 }
673
674                 sle_dma = sg_dma_address(sg);
675                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
676                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
677                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
678                 avail_dsds--;
679         }
680 }
681
682
683 /**
684  * qla24xx_start_scsi() - Send a SCSI command to the ISP
685  * @sp: command to send to the ISP
686  *
687  * Returns non-zero if a failure occurred, else zero.
688  */
689 int
690 qla24xx_start_scsi(srb_t *sp)
691 {
692         int             ret, nseg;
693         unsigned long   flags;
694         scsi_qla_host_t *vha;
695         struct scsi_cmnd *cmd;
696         uint32_t        *clr_ptr;
697         uint32_t        index;
698         uint32_t        handle;
699         struct cmd_type_7 *cmd_pkt;
700         uint16_t        cnt;
701         uint16_t        req_cnt;
702         uint16_t        tot_dsds;
703         struct device_reg_24xx __iomem *reg;
704         struct qla_hw_data *ha;
705         struct req_que *req;
706
707         /* Setup device pointers. */
708         ret = 0;
709         vha = sp->vha;
710         ha = vha->hw;
711         reg = &ha->iobase->isp24;
712         cmd = sp->cmd;
713         req = ha->req;
714         /* So we know we haven't pci_map'ed anything yet */
715         tot_dsds = 0;
716
717         /* Send marker if required */
718         if (vha->marker_needed != 0) {
719                 if (qla2x00_marker(vha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
720                         return QLA_FUNCTION_FAILED;
721                 vha->marker_needed = 0;
722         }
723
724         /* Acquire ring specific lock */
725         spin_lock_irqsave(&ha->hardware_lock, flags);
726
727         /* Check for room in outstanding command list. */
728         handle = req->current_outstanding_cmd;
729         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
730                 handle++;
731                 if (handle == MAX_OUTSTANDING_COMMANDS)
732                         handle = 1;
733                 if (!req->outstanding_cmds[handle])
734                         break;
735         }
736         if (index == MAX_OUTSTANDING_COMMANDS)
737                 goto queuing_error;
738
739         /* Map the sg table so we have an accurate count of sg entries needed */
740         if (scsi_sg_count(cmd)) {
741                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
742                     scsi_sg_count(cmd), cmd->sc_data_direction);
743                 if (unlikely(!nseg))
744                         goto queuing_error;
745         } else
746                 nseg = 0;
747
748         tot_dsds = nseg;
749
750         req_cnt = qla24xx_calc_iocbs(tot_dsds);
751         if (req->cnt < (req_cnt + 2)) {
752                 cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out);
753                 if (req->ring_index < cnt)
754                         req->cnt = cnt - req->ring_index;
755                 else
756                         req->cnt = req->length -
757                                 (req->ring_index - cnt);
758         }
759         if (req->cnt < (req_cnt + 2))
760                 goto queuing_error;
761
762         /* Build command packet. */
763         req->current_outstanding_cmd = handle;
764         req->outstanding_cmds[handle] = sp;
765         sp->vha = vha;
766         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
767         req->cnt -= req_cnt;
768
769         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
770         cmd_pkt->handle = handle;
771
772         /* Zero out remaining portion of packet. */
773         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
774         clr_ptr = (uint32_t *)cmd_pkt + 2;
775         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
776         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
777
778         /* Set NPORT-ID and LUN number*/
779         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
780         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
781         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
782         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
783         cmd_pkt->vp_index = sp->fcport->vp_idx;
784
785         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
786         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
787
788         /* Load SCSI command packet. */
789         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
790         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
791
792         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
793
794         /* Build IOCB segments */
795         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
796
797         /* Set total data segment count. */
798         cmd_pkt->entry_count = (uint8_t)req_cnt;
799         wmb();
800
801         /* Adjust ring index. */
802         req->ring_index++;
803         if (req->ring_index == req->length) {
804                 req->ring_index = 0;
805                 req->ring_ptr = req->ring;
806         } else
807                 req->ring_ptr++;
808
809         sp->flags |= SRB_DMA_VALID;
810
811         /* Set chip new ring index. */
812         WRT_REG_DWORD(&reg->req_q_in, req->ring_index);
813         RD_REG_DWORD_RELAXED(&reg->req_q_in);           /* PCI Posting. */
814
815         /* Manage unprocessed RIO/ZIO commands in response queue. */
816         if (vha->flags.process_response_queue &&
817             ha->rsp->ring_ptr->signature != RESPONSE_PROCESSED)
818                 qla24xx_process_response_queue(vha);
819
820         spin_unlock_irqrestore(&ha->hardware_lock, flags);
821         return QLA_SUCCESS;
822
823 queuing_error:
824         if (tot_dsds)
825                 scsi_dma_unmap(cmd);
826
827         spin_unlock_irqrestore(&ha->hardware_lock, flags);
828
829         return QLA_FUNCTION_FAILED;
830 }