]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/scsi/qla2xxx/qla_iocb.c
Merge branch 'upstream/core' of git://git.kernel.org/pub/scm/linux/kernel/git/jeremy/xen
[net-next-2.6.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
15
16 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 /**
18  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19  * @cmd: SCSI command
20  *
21  * Returns the proper CF_* direction based on CDB.
22  */
23 static inline uint16_t
24 qla2x00_get_cmd_direction(srb_t *sp)
25 {
26         uint16_t cflags;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 sp->fcport->vha->hw->qla_stats.output_bytes +=
34                     scsi_bufflen(sp->cmd);
35         } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 sp->fcport->vha->hw->qla_stats.input_bytes +=
38                     scsi_bufflen(sp->cmd);
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) =
111             __constant_cpu_to_le32(CONTINUE_TYPE);
112
113         return (cont_pkt);
114 }
115
116 /**
117  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118  * @ha: HA context
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         struct req_que *req = vha->req;
128         /* Adjust ring index. */
129         req->ring_index++;
130         if (req->ring_index == req->length) {
131                 req->ring_index = 0;
132                 req->ring_ptr = req->ring;
133         } else {
134                 req->ring_ptr++;
135         }
136
137         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138
139         /* Load packet defaults. */
140         *((uint32_t *)(&cont_pkt->entry_type)) =
141             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
142
143         return (cont_pkt);
144 }
145
146 static inline int
147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 {
149         uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
150
151         /* We only support T10 DIF right now */
152         if (guard != SHOST_DIX_GUARD_CRC) {
153                 DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
154                 return 0;
155         }
156
157         /* We always use DIFF Bundling for best performance */
158         *fw_prot_opts = 0;
159
160         /* Translate SCSI opcode to a protection opcode */
161         switch (scsi_get_prot_op(sp->cmd)) {
162         case SCSI_PROT_READ_STRIP:
163                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
164                 break;
165         case SCSI_PROT_WRITE_INSERT:
166                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167                 break;
168         case SCSI_PROT_READ_INSERT:
169                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
170                 break;
171         case SCSI_PROT_WRITE_STRIP:
172                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173                 break;
174         case SCSI_PROT_READ_PASS:
175                 *fw_prot_opts |= PO_MODE_DIF_PASS;
176                 break;
177         case SCSI_PROT_WRITE_PASS:
178                 *fw_prot_opts |= PO_MODE_DIF_PASS;
179                 break;
180         default:        /* Normal Request */
181                 *fw_prot_opts |= PO_MODE_DIF_PASS;
182                 break;
183         }
184
185         return scsi_prot_sg_count(sp->cmd);
186 }
187
188 /*
189  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190  * capable IOCB types.
191  *
192  * @sp: SRB command to process
193  * @cmd_pkt: Command type 2 IOCB
194  * @tot_dsds: Total number of segments to transfer
195  */
196 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197     uint16_t tot_dsds)
198 {
199         uint16_t        avail_dsds;
200         uint32_t        *cur_dsd;
201         scsi_qla_host_t *vha;
202         struct scsi_cmnd *cmd;
203         struct scatterlist *sg;
204         int i;
205
206         cmd = sp->cmd;
207
208         /* Update entry type to indicate Command Type 2 IOCB */
209         *((uint32_t *)(&cmd_pkt->entry_type)) =
210             __constant_cpu_to_le32(COMMAND_TYPE);
211
212         /* No data transfer */
213         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
214                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215                 return;
216         }
217
218         vha = sp->fcport->vha;
219         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
220
221         /* Three DSDs are available in the Command Type 2 IOCB */
222         avail_dsds = 3;
223         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224
225         /* Load data segments */
226         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227                 cont_entry_t *cont_pkt;
228
229                 /* Allocate additional continuation packets? */
230                 if (avail_dsds == 0) {
231                         /*
232                          * Seven DSDs are available in the Continuation
233                          * Type 0 IOCB.
234                          */
235                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
236                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237                         avail_dsds = 7;
238                 }
239
240                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242                 avail_dsds--;
243         }
244 }
245
246 /**
247  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248  * capable IOCB types.
249  *
250  * @sp: SRB command to process
251  * @cmd_pkt: Command type 3 IOCB
252  * @tot_dsds: Total number of segments to transfer
253  */
254 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255     uint16_t tot_dsds)
256 {
257         uint16_t        avail_dsds;
258         uint32_t        *cur_dsd;
259         scsi_qla_host_t *vha;
260         struct scsi_cmnd *cmd;
261         struct scatterlist *sg;
262         int i;
263
264         cmd = sp->cmd;
265
266         /* Update entry type to indicate Command Type 3 IOCB */
267         *((uint32_t *)(&cmd_pkt->entry_type)) =
268             __constant_cpu_to_le32(COMMAND_A64_TYPE);
269
270         /* No data transfer */
271         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
272                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273                 return;
274         }
275
276         vha = sp->fcport->vha;
277         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
278
279         /* Two DSDs are available in the Command Type 3 IOCB */
280         avail_dsds = 2;
281         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282
283         /* Load data segments */
284         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285                 dma_addr_t      sle_dma;
286                 cont_a64_entry_t *cont_pkt;
287
288                 /* Allocate additional continuation packets? */
289                 if (avail_dsds == 0) {
290                         /*
291                          * Five DSDs are available in the Continuation
292                          * Type 1 IOCB.
293                          */
294                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
295                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296                         avail_dsds = 5;
297                 }
298
299                 sle_dma = sg_dma_address(sg);
300                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303                 avail_dsds--;
304         }
305 }
306
307 /**
308  * qla2x00_start_scsi() - Send a SCSI command to the ISP
309  * @sp: command to send to the ISP
310  *
311  * Returns non-zero if a failure occurred, else zero.
312  */
313 int
314 qla2x00_start_scsi(srb_t *sp)
315 {
316         int             ret, nseg;
317         unsigned long   flags;
318         scsi_qla_host_t *vha;
319         struct scsi_cmnd *cmd;
320         uint32_t        *clr_ptr;
321         uint32_t        index;
322         uint32_t        handle;
323         cmd_entry_t     *cmd_pkt;
324         uint16_t        cnt;
325         uint16_t        req_cnt;
326         uint16_t        tot_dsds;
327         struct device_reg_2xxx __iomem *reg;
328         struct qla_hw_data *ha;
329         struct req_que *req;
330         struct rsp_que *rsp;
331
332         /* Setup device pointers. */
333         ret = 0;
334         vha = sp->fcport->vha;
335         ha = vha->hw;
336         reg = &ha->iobase->isp;
337         cmd = sp->cmd;
338         req = ha->req_q_map[0];
339         rsp = ha->rsp_q_map[0];
340         /* So we know we haven't pci_map'ed anything yet */
341         tot_dsds = 0;
342
343         /* Send marker if required */
344         if (vha->marker_needed != 0) {
345                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
346                                                         != QLA_SUCCESS)
347                         return (QLA_FUNCTION_FAILED);
348                 vha->marker_needed = 0;
349         }
350
351         /* Acquire ring specific lock */
352         spin_lock_irqsave(&ha->hardware_lock, flags);
353
354         /* Check for room in outstanding command list. */
355         handle = req->current_outstanding_cmd;
356         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
357                 handle++;
358                 if (handle == MAX_OUTSTANDING_COMMANDS)
359                         handle = 1;
360                 if (!req->outstanding_cmds[handle])
361                         break;
362         }
363         if (index == MAX_OUTSTANDING_COMMANDS)
364                 goto queuing_error;
365
366         /* Map the sg table so we have an accurate count of sg entries needed */
367         if (scsi_sg_count(cmd)) {
368                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
369                     scsi_sg_count(cmd), cmd->sc_data_direction);
370                 if (unlikely(!nseg))
371                         goto queuing_error;
372         } else
373                 nseg = 0;
374
375         tot_dsds = nseg;
376
377         /* Calculate the number of request entries needed. */
378         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
379         if (req->cnt < (req_cnt + 2)) {
380                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
381                 if (req->ring_index < cnt)
382                         req->cnt = cnt - req->ring_index;
383                 else
384                         req->cnt = req->length -
385                             (req->ring_index - cnt);
386         }
387         if (req->cnt < (req_cnt + 2))
388                 goto queuing_error;
389
390         /* Build command packet */
391         req->current_outstanding_cmd = handle;
392         req->outstanding_cmds[handle] = sp;
393         sp->handle = handle;
394         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395         req->cnt -= req_cnt;
396
397         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398         cmd_pkt->handle = handle;
399         /* Zero out remaining portion of packet. */
400         clr_ptr = (uint32_t *)cmd_pkt + 2;
401         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
404         /* Set target ID and LUN number*/
405         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406         cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
407
408         /* Update tagged queuing modifier */
409         cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
410
411         /* Load SCSI command packet. */
412         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
413         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
414
415         /* Build IOCB segments */
416         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
417
418         /* Set total data segment count. */
419         cmd_pkt->entry_count = (uint8_t)req_cnt;
420         wmb();
421
422         /* Adjust ring index. */
423         req->ring_index++;
424         if (req->ring_index == req->length) {
425                 req->ring_index = 0;
426                 req->ring_ptr = req->ring;
427         } else
428                 req->ring_ptr++;
429
430         sp->flags |= SRB_DMA_VALID;
431
432         /* Set chip new ring index. */
433         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
434         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
435
436         /* Manage unprocessed RIO/ZIO commands in response queue. */
437         if (vha->flags.process_response_queue &&
438             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
439                 qla2x00_process_response_queue(rsp);
440
441         spin_unlock_irqrestore(&ha->hardware_lock, flags);
442         return (QLA_SUCCESS);
443
444 queuing_error:
445         if (tot_dsds)
446                 scsi_dma_unmap(cmd);
447
448         spin_unlock_irqrestore(&ha->hardware_lock, flags);
449
450         return (QLA_FUNCTION_FAILED);
451 }
452
453 /**
454  * qla2x00_marker() - Send a marker IOCB to the firmware.
455  * @ha: HA context
456  * @loop_id: loop ID
457  * @lun: LUN
458  * @type: marker modifier
459  *
460  * Can be called from both normal and interrupt context.
461  *
462  * Returns non-zero if a failure occurred, else zero.
463  */
464 static int
465 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
466                         struct rsp_que *rsp, uint16_t loop_id,
467                         uint16_t lun, uint8_t type)
468 {
469         mrk_entry_t *mrk;
470         struct mrk_entry_24xx *mrk24;
471         struct qla_hw_data *ha = vha->hw;
472         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
473
474         mrk24 = NULL;
475         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
476         if (mrk == NULL) {
477                 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
478                     __func__, base_vha->host_no));
479
480                 return (QLA_FUNCTION_FAILED);
481         }
482
483         mrk->entry_type = MARKER_TYPE;
484         mrk->modifier = type;
485         if (type != MK_SYNC_ALL) {
486                 if (IS_FWI2_CAPABLE(ha)) {
487                         mrk24 = (struct mrk_entry_24xx *) mrk;
488                         mrk24->nport_handle = cpu_to_le16(loop_id);
489                         mrk24->lun[1] = LSB(lun);
490                         mrk24->lun[2] = MSB(lun);
491                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
492                         mrk24->vp_index = vha->vp_idx;
493                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
494                 } else {
495                         SET_TARGET_ID(ha, mrk->target, loop_id);
496                         mrk->lun = cpu_to_le16(lun);
497                 }
498         }
499         wmb();
500
501         qla2x00_isp_cmd(vha, req);
502
503         return (QLA_SUCCESS);
504 }
505
506 int
507 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
508                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
509                 uint8_t type)
510 {
511         int ret;
512         unsigned long flags = 0;
513
514         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
515         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
516         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
517
518         return (ret);
519 }
520
521 /**
522  * qla2x00_isp_cmd() - Modify the request ring pointer.
523  * @ha: HA context
524  *
525  * Note: The caller must hold the hardware lock before calling this routine.
526  */
527 static void
528 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
529 {
530         struct qla_hw_data *ha = vha->hw;
531         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
532         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
533
534         DEBUG5(printk("%s(): IOCB data:\n", __func__));
535         DEBUG5(qla2x00_dump_buffer(
536             (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
537
538         /* Adjust ring index. */
539         req->ring_index++;
540         if (req->ring_index == req->length) {
541                 req->ring_index = 0;
542                 req->ring_ptr = req->ring;
543         } else
544                 req->ring_ptr++;
545
546         /* Set chip new ring index. */
547         if (IS_QLA82XX(ha)) {
548                 uint32_t dbval = 0x04 | (ha->portnum << 5);
549
550                 /* write, read and verify logic */
551                 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
552                 if (ql2xdbwr)
553                         qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
554                 else {
555                         WRT_REG_DWORD(
556                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
557                                 dbval);
558                         wmb();
559                         while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
560                                 WRT_REG_DWORD((unsigned long __iomem *)
561                                         ha->nxdb_wr_ptr, dbval);
562                                 wmb();
563                         }
564                 }
565         } else if (ha->mqenable) {
566                 /* Set chip new ring index. */
567                 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
568                 RD_REG_DWORD(&ioreg->hccr);
569         } else {
570                 if (IS_FWI2_CAPABLE(ha)) {
571                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
572                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
573                 } else {
574                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
575                                 req->ring_index);
576                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
577                 }
578         }
579
580 }
581
582 /**
583  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
584  * Continuation Type 1 IOCBs to allocate.
585  *
586  * @dsds: number of data segment decriptors needed
587  *
588  * Returns the number of IOCB entries needed to store @dsds.
589  */
590 inline uint16_t
591 qla24xx_calc_iocbs(uint16_t dsds)
592 {
593         uint16_t iocbs;
594
595         iocbs = 1;
596         if (dsds > 1) {
597                 iocbs += (dsds - 1) / 5;
598                 if ((dsds - 1) % 5)
599                         iocbs++;
600         }
601         DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
602             __func__, iocbs));
603         return iocbs;
604 }
605
606 /**
607  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
608  * IOCB types.
609  *
610  * @sp: SRB command to process
611  * @cmd_pkt: Command type 3 IOCB
612  * @tot_dsds: Total number of segments to transfer
613  */
614 inline void
615 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
616     uint16_t tot_dsds)
617 {
618         uint16_t        avail_dsds;
619         uint32_t        *cur_dsd;
620         scsi_qla_host_t *vha;
621         struct scsi_cmnd *cmd;
622         struct scatterlist *sg;
623         int i;
624         struct req_que *req;
625
626         cmd = sp->cmd;
627
628         /* Update entry type to indicate Command Type 3 IOCB */
629         *((uint32_t *)(&cmd_pkt->entry_type)) =
630             __constant_cpu_to_le32(COMMAND_TYPE_7);
631
632         /* No data transfer */
633         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
634                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
635                 return;
636         }
637
638         vha = sp->fcport->vha;
639         req = vha->req;
640
641         /* Set transfer direction */
642         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
643                 cmd_pkt->task_mgmt_flags =
644                     __constant_cpu_to_le16(TMF_WRITE_DATA);
645                 sp->fcport->vha->hw->qla_stats.output_bytes +=
646                     scsi_bufflen(sp->cmd);
647         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
648                 cmd_pkt->task_mgmt_flags =
649                     __constant_cpu_to_le16(TMF_READ_DATA);
650                 sp->fcport->vha->hw->qla_stats.input_bytes +=
651                     scsi_bufflen(sp->cmd);
652         }
653
654         /* One DSD is available in the Command Type 3 IOCB */
655         avail_dsds = 1;
656         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
657
658         /* Load data segments */
659
660         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
661                 dma_addr_t      sle_dma;
662                 cont_a64_entry_t *cont_pkt;
663
664                 /* Allocate additional continuation packets? */
665                 if (avail_dsds == 0) {
666                         /*
667                          * Five DSDs are available in the Continuation
668                          * Type 1 IOCB.
669                          */
670                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
671                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
672                         avail_dsds = 5;
673                 }
674
675                 sle_dma = sg_dma_address(sg);
676                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
677                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
678                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
679                 avail_dsds--;
680         }
681 }
682
683 struct fw_dif_context {
684         uint32_t ref_tag;
685         uint16_t app_tag;
686         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
687         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
688 };
689
690 /*
691  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
692  *
693  */
694 static inline void
695 qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
696     unsigned int protcnt)
697 {
698         struct sd_dif_tuple *spt;
699         unsigned char op = scsi_get_prot_op(cmd);
700
701         switch (scsi_get_prot_type(cmd)) {
702         /* For TYPE 0 protection: no checking */
703         case SCSI_PROT_DIF_TYPE0:
704                 pkt->ref_tag_mask[0] = 0x00;
705                 pkt->ref_tag_mask[1] = 0x00;
706                 pkt->ref_tag_mask[2] = 0x00;
707                 pkt->ref_tag_mask[3] = 0x00;
708                 break;
709
710         /*
711          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
712          * match LBA in CDB + N
713          */
714         case SCSI_PROT_DIF_TYPE2:
715                 if (!ql2xenablehba_err_chk)
716                         break;
717
718                 if (scsi_prot_sg_count(cmd)) {
719                         spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
720                             scsi_prot_sglist(cmd)[0].offset;
721                         pkt->app_tag = swab32(spt->app_tag);
722                         pkt->app_tag_mask[0] =  0xff;
723                         pkt->app_tag_mask[1] =  0xff;
724                 }
725
726                 pkt->ref_tag = cpu_to_le32((uint32_t)
727                     (0xffffffff & scsi_get_lba(cmd)));
728
729                 /* enable ALL bytes of the ref tag */
730                 pkt->ref_tag_mask[0] = 0xff;
731                 pkt->ref_tag_mask[1] = 0xff;
732                 pkt->ref_tag_mask[2] = 0xff;
733                 pkt->ref_tag_mask[3] = 0xff;
734                 break;
735
736         /* For Type 3 protection: 16 bit GUARD only */
737         case SCSI_PROT_DIF_TYPE3:
738                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
739                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
740                                                                 0x00;
741                 break;
742
743         /*
744          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
745          * 16 bit app tag.
746          */
747         case SCSI_PROT_DIF_TYPE1:
748                 if (!ql2xenablehba_err_chk)
749                         break;
750
751                 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
752                     op == SCSI_PROT_WRITE_PASS)) {
753                         spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
754                             scsi_prot_sglist(cmd)[0].offset;
755                         DEBUG18(printk(KERN_DEBUG
756                             "%s(): LBA from user %p, lba = 0x%x\n",
757                             __func__, spt, (int)spt->ref_tag));
758                         pkt->ref_tag = swab32(spt->ref_tag);
759                         pkt->app_tag_mask[0] = 0x0;
760                         pkt->app_tag_mask[1] = 0x0;
761                 } else {
762                         pkt->ref_tag = cpu_to_le32((uint32_t)
763                             (0xffffffff & scsi_get_lba(cmd)));
764                         pkt->app_tag = __constant_cpu_to_le16(0);
765                         pkt->app_tag_mask[0] = 0x0;
766                         pkt->app_tag_mask[1] = 0x0;
767                 }
768                 /* enable ALL bytes of the ref tag */
769                 pkt->ref_tag_mask[0] = 0xff;
770                 pkt->ref_tag_mask[1] = 0xff;
771                 pkt->ref_tag_mask[2] = 0xff;
772                 pkt->ref_tag_mask[3] = 0xff;
773                 break;
774         }
775
776         DEBUG18(printk(KERN_DEBUG
777             "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
778             " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
779             " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
780             (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
781 }
782
783
784 static int
785 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
786         uint16_t tot_dsds)
787 {
788         void *next_dsd;
789         uint8_t avail_dsds = 0;
790         uint32_t dsd_list_len;
791         struct dsd_dma *dsd_ptr;
792         struct scatterlist *sg;
793         uint32_t *cur_dsd = dsd;
794         int     i;
795         uint16_t        used_dsds = tot_dsds;
796
797         uint8_t         *cp;
798
799         scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
800                 dma_addr_t      sle_dma;
801
802                 /* Allocate additional continuation packets? */
803                 if (avail_dsds == 0) {
804                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
805                                         QLA_DSDS_PER_IOCB : used_dsds;
806                         dsd_list_len = (avail_dsds + 1) * 12;
807                         used_dsds -= avail_dsds;
808
809                         /* allocate tracking DS */
810                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
811                         if (!dsd_ptr)
812                                 return 1;
813
814                         /* allocate new list */
815                         dsd_ptr->dsd_addr = next_dsd =
816                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
817                                 &dsd_ptr->dsd_list_dma);
818
819                         if (!next_dsd) {
820                                 /*
821                                  * Need to cleanup only this dsd_ptr, rest
822                                  * will be done by sp_free_dma()
823                                  */
824                                 kfree(dsd_ptr);
825                                 return 1;
826                         }
827
828                         list_add_tail(&dsd_ptr->list,
829                             &((struct crc_context *)sp->ctx)->dsd_list);
830
831                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
832
833                         /* add new list to cmd iocb or last list */
834                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
835                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
836                         *cur_dsd++ = dsd_list_len;
837                         cur_dsd = (uint32_t *)next_dsd;
838                 }
839                 sle_dma = sg_dma_address(sg);
840                 DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
841                     " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
842                     MSD(sle_dma), sg_dma_len(sg)));
843                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
844                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
845                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
846                 avail_dsds--;
847
848                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
849                         cp = page_address(sg_page(sg)) + sg->offset;
850                         DEBUG18(printk("%s(): User Data buffer= %p:\n",
851                             __func__ , cp));
852                 }
853         }
854         /* Null termination */
855         *cur_dsd++ = 0;
856         *cur_dsd++ = 0;
857         *cur_dsd++ = 0;
858         return 0;
859 }
860
861 static int
862 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
863                                                         uint32_t *dsd,
864         uint16_t tot_dsds)
865 {
866         void *next_dsd;
867         uint8_t avail_dsds = 0;
868         uint32_t dsd_list_len;
869         struct dsd_dma *dsd_ptr;
870         struct scatterlist *sg;
871         int     i;
872         struct scsi_cmnd *cmd;
873         uint32_t *cur_dsd = dsd;
874         uint16_t        used_dsds = tot_dsds;
875
876         uint8_t         *cp;
877
878
879         cmd = sp->cmd;
880         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
881                 dma_addr_t      sle_dma;
882
883                 /* Allocate additional continuation packets? */
884                 if (avail_dsds == 0) {
885                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
886                                                 QLA_DSDS_PER_IOCB : used_dsds;
887                         dsd_list_len = (avail_dsds + 1) * 12;
888                         used_dsds -= avail_dsds;
889
890                         /* allocate tracking DS */
891                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
892                         if (!dsd_ptr)
893                                 return 1;
894
895                         /* allocate new list */
896                         dsd_ptr->dsd_addr = next_dsd =
897                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
898                                 &dsd_ptr->dsd_list_dma);
899
900                         if (!next_dsd) {
901                                 /*
902                                  * Need to cleanup only this dsd_ptr, rest
903                                  * will be done by sp_free_dma()
904                                  */
905                                 kfree(dsd_ptr);
906                                 return 1;
907                         }
908
909                         list_add_tail(&dsd_ptr->list,
910                             &((struct crc_context *)sp->ctx)->dsd_list);
911
912                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
913
914                         /* add new list to cmd iocb or last list */
915                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
916                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
917                         *cur_dsd++ = dsd_list_len;
918                         cur_dsd = (uint32_t *)next_dsd;
919                 }
920                 sle_dma = sg_dma_address(sg);
921                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
922                         DEBUG18(printk(KERN_DEBUG
923                             "%s(): %p, sg entry %d - addr =0x%x"
924                             "0x%x, len =%d\n", __func__ , cur_dsd, i,
925                             LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
926                 }
927                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
928                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
929                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
930
931                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
932                         cp = page_address(sg_page(sg)) + sg->offset;
933                         DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
934                             __func__ , cp));
935                 }
936                 avail_dsds--;
937         }
938         /* Null termination */
939         *cur_dsd++ = 0;
940         *cur_dsd++ = 0;
941         *cur_dsd++ = 0;
942         return 0;
943 }
944
945 /**
946  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
947  *                                                      Type 6 IOCB types.
948  *
949  * @sp: SRB command to process
950  * @cmd_pkt: Command type 3 IOCB
951  * @tot_dsds: Total number of segments to transfer
952  */
953 static inline int
954 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
955     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
956 {
957         uint32_t                *cur_dsd, *fcp_dl;
958         scsi_qla_host_t         *vha;
959         struct scsi_cmnd        *cmd;
960         struct scatterlist      *cur_seg;
961         int                     sgc;
962         uint32_t                total_bytes;
963         uint32_t                data_bytes;
964         uint32_t                dif_bytes;
965         uint8_t                 bundling = 1;
966         uint16_t                blk_size;
967         uint8_t                 *clr_ptr;
968         struct crc_context      *crc_ctx_pkt = NULL;
969         struct qla_hw_data      *ha;
970         uint8_t                 additional_fcpcdb_len;
971         uint16_t                fcp_cmnd_len;
972         struct fcp_cmnd         *fcp_cmnd;
973         dma_addr_t              crc_ctx_dma;
974
975         cmd = sp->cmd;
976
977         sgc = 0;
978         /* Update entry type to indicate Command Type CRC_2 IOCB */
979         *((uint32_t *)(&cmd_pkt->entry_type)) =
980             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
981
982         /* No data transfer */
983         data_bytes = scsi_bufflen(cmd);
984         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
985                 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
986                     __func__, data_bytes));
987                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
988                 return QLA_SUCCESS;
989         }
990
991         vha = sp->fcport->vha;
992         ha = vha->hw;
993
994         DEBUG18(printk(KERN_DEBUG
995             "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
996             vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
997
998         cmd_pkt->vp_index = sp->fcport->vp_idx;
999
1000         /* Set transfer direction */
1001         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1002                 cmd_pkt->control_flags =
1003                     __constant_cpu_to_le16(CF_WRITE_DATA);
1004         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1005                 cmd_pkt->control_flags =
1006                     __constant_cpu_to_le16(CF_READ_DATA);
1007         }
1008
1009         tot_prot_dsds = scsi_prot_sg_count(cmd);
1010         if (!tot_prot_dsds)
1011                 bundling = 0;
1012
1013         /* Allocate CRC context from global pool */
1014         crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1015             GFP_ATOMIC, &crc_ctx_dma);
1016
1017         if (!crc_ctx_pkt)
1018                 goto crc_queuing_error;
1019
1020         /* Zero out CTX area. */
1021         clr_ptr = (uint8_t *)crc_ctx_pkt;
1022         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1023
1024         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1025
1026         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1027
1028         /* Set handle */
1029         crc_ctx_pkt->handle = cmd_pkt->handle;
1030
1031         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1032
1033         qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
1034             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1035
1036         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1037         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1038         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1039
1040         /* Determine SCSI command length -- align to 4 byte boundary */
1041         if (cmd->cmd_len > 16) {
1042                 DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
1043                     __func__));
1044                 additional_fcpcdb_len = cmd->cmd_len - 16;
1045                 if ((cmd->cmd_len % 4) != 0) {
1046                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1047                         goto crc_queuing_error;
1048                 }
1049                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1050         } else {
1051                 additional_fcpcdb_len = 0;
1052                 fcp_cmnd_len = 12 + 16 + 4;
1053         }
1054
1055         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1056
1057         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1058         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1059                 fcp_cmnd->additional_cdb_len |= 1;
1060         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1061                 fcp_cmnd->additional_cdb_len |= 2;
1062
1063         int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1064         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1065         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1066         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1067             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1068         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1069             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1070         fcp_cmnd->task_attribute = 0;
1071         fcp_cmnd->task_management = 0;
1072
1073         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1074
1075         DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
1076             "entries %d, data bytes %d, Protection entries %d\n",
1077             __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
1078             data_bytes, tot_prot_dsds));
1079
1080         /* Compute dif len and adjust data len to incude protection */
1081         total_bytes = data_bytes;
1082         dif_bytes = 0;
1083         blk_size = cmd->device->sector_size;
1084         if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1085                 dif_bytes = (data_bytes / blk_size) * 8;
1086                 total_bytes += dif_bytes;
1087         }
1088
1089         if (!ql2xenablehba_err_chk)
1090                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1091
1092         if (!bundling) {
1093                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1094         } else {
1095                 /*
1096                  * Configure Bundling if we need to fetch interlaving
1097                  * protection PCI accesses
1098                  */
1099                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1100                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1101                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1102                                                         tot_prot_dsds);
1103                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1104         }
1105
1106         /* Finish the common fields of CRC pkt */
1107         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1108         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1109         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1110         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1111         /* Fibre channel byte count */
1112         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1113         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1114             additional_fcpcdb_len);
1115         *fcp_dl = htonl(total_bytes);
1116
1117         DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
1118             " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
1119             vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
1120             crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
1121
1122         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1123                 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1124                     __func__, data_bytes));
1125                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1126                 return QLA_SUCCESS;
1127         }
1128         /* Walks data segments */
1129
1130         cmd_pkt->control_flags |=
1131             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1132         if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1133             (tot_dsds - tot_prot_dsds)))
1134                 goto crc_queuing_error;
1135
1136         if (bundling && tot_prot_dsds) {
1137                 /* Walks dif segments */
1138                 cur_seg = scsi_prot_sglist(cmd);
1139                 cmd_pkt->control_flags |=
1140                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1141                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1142                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1143                     tot_prot_dsds))
1144                         goto crc_queuing_error;
1145         }
1146         return QLA_SUCCESS;
1147
1148 crc_queuing_error:
1149         DEBUG18(qla_printk(KERN_INFO, ha,
1150             "CMD sent FAILED crc_q error:sp = %p\n", sp));
1151         /* Cleanup will be performed by the caller */
1152
1153         return QLA_FUNCTION_FAILED;
1154 }
1155
1156 /**
1157  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1158  * @sp: command to send to the ISP
1159  *
1160  * Returns non-zero if a failure occurred, else zero.
1161  */
1162 int
1163 qla24xx_start_scsi(srb_t *sp)
1164 {
1165         int             ret, nseg;
1166         unsigned long   flags;
1167         uint32_t        *clr_ptr;
1168         uint32_t        index;
1169         uint32_t        handle;
1170         struct cmd_type_7 *cmd_pkt;
1171         uint16_t        cnt;
1172         uint16_t        req_cnt;
1173         uint16_t        tot_dsds;
1174         struct req_que *req = NULL;
1175         struct rsp_que *rsp = NULL;
1176         struct scsi_cmnd *cmd = sp->cmd;
1177         struct scsi_qla_host *vha = sp->fcport->vha;
1178         struct qla_hw_data *ha = vha->hw;
1179
1180         /* Setup device pointers. */
1181         ret = 0;
1182
1183         qla25xx_set_que(sp, &rsp);
1184         req = vha->req;
1185
1186         /* So we know we haven't pci_map'ed anything yet */
1187         tot_dsds = 0;
1188
1189         /* Send marker if required */
1190         if (vha->marker_needed != 0) {
1191                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
1192                                                         != QLA_SUCCESS)
1193                         return QLA_FUNCTION_FAILED;
1194                 vha->marker_needed = 0;
1195         }
1196
1197         /* Acquire ring specific lock */
1198         spin_lock_irqsave(&ha->hardware_lock, flags);
1199
1200         /* Check for room in outstanding command list. */
1201         handle = req->current_outstanding_cmd;
1202         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1203                 handle++;
1204                 if (handle == MAX_OUTSTANDING_COMMANDS)
1205                         handle = 1;
1206                 if (!req->outstanding_cmds[handle])
1207                         break;
1208         }
1209         if (index == MAX_OUTSTANDING_COMMANDS)
1210                 goto queuing_error;
1211
1212         /* Map the sg table so we have an accurate count of sg entries needed */
1213         if (scsi_sg_count(cmd)) {
1214                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1215                     scsi_sg_count(cmd), cmd->sc_data_direction);
1216                 if (unlikely(!nseg))
1217                         goto queuing_error;
1218         } else
1219                 nseg = 0;
1220
1221         tot_dsds = nseg;
1222
1223         req_cnt = qla24xx_calc_iocbs(tot_dsds);
1224         if (req->cnt < (req_cnt + 2)) {
1225                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1226
1227                 if (req->ring_index < cnt)
1228                         req->cnt = cnt - req->ring_index;
1229                 else
1230                         req->cnt = req->length -
1231                                 (req->ring_index - cnt);
1232         }
1233         if (req->cnt < (req_cnt + 2))
1234                 goto queuing_error;
1235
1236         /* Build command packet. */
1237         req->current_outstanding_cmd = handle;
1238         req->outstanding_cmds[handle] = sp;
1239         sp->handle = handle;
1240         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1241         req->cnt -= req_cnt;
1242
1243         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1244         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1245
1246         /* Zero out remaining portion of packet. */
1247         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1248         clr_ptr = (uint32_t *)cmd_pkt + 2;
1249         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1250         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1251
1252         /* Set NPORT-ID and LUN number*/
1253         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1254         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1255         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1256         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1257         cmd_pkt->vp_index = sp->fcport->vp_idx;
1258
1259         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1260         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1261
1262         /* Load SCSI command packet. */
1263         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1264         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1265
1266         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1267
1268         /* Build IOCB segments */
1269         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1270
1271         /* Set total data segment count. */
1272         cmd_pkt->entry_count = (uint8_t)req_cnt;
1273         /* Specify response queue number where completion should happen */
1274         cmd_pkt->entry_status = (uint8_t) rsp->id;
1275         wmb();
1276
1277         /* Adjust ring index. */
1278         req->ring_index++;
1279         if (req->ring_index == req->length) {
1280                 req->ring_index = 0;
1281                 req->ring_ptr = req->ring;
1282         } else
1283                 req->ring_ptr++;
1284
1285         sp->flags |= SRB_DMA_VALID;
1286
1287         /* Set chip new ring index. */
1288         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1289         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1290
1291         /* Manage unprocessed RIO/ZIO commands in response queue. */
1292         if (vha->flags.process_response_queue &&
1293                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1294                 qla24xx_process_response_queue(vha, rsp);
1295
1296         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1297         return QLA_SUCCESS;
1298
1299 queuing_error:
1300         if (tot_dsds)
1301                 scsi_dma_unmap(cmd);
1302
1303         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1304
1305         return QLA_FUNCTION_FAILED;
1306 }
1307
1308
1309 /**
1310  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1311  * @sp: command to send to the ISP
1312  *
1313  * Returns non-zero if a failure occurred, else zero.
1314  */
1315 int
1316 qla24xx_dif_start_scsi(srb_t *sp)
1317 {
1318         int                     nseg;
1319         unsigned long           flags;
1320         uint32_t                *clr_ptr;
1321         uint32_t                index;
1322         uint32_t                handle;
1323         uint16_t                cnt;
1324         uint16_t                req_cnt = 0;
1325         uint16_t                tot_dsds;
1326         uint16_t                tot_prot_dsds;
1327         uint16_t                fw_prot_opts = 0;
1328         struct req_que          *req = NULL;
1329         struct rsp_que          *rsp = NULL;
1330         struct scsi_cmnd        *cmd = sp->cmd;
1331         struct scsi_qla_host    *vha = sp->fcport->vha;
1332         struct qla_hw_data      *ha = vha->hw;
1333         struct cmd_type_crc_2   *cmd_pkt;
1334         uint32_t                status = 0;
1335
1336 #define QDSS_GOT_Q_SPACE        BIT_0
1337
1338         /* Only process protection or >16 cdb in this routine */
1339         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1340                 if (cmd->cmd_len <= 16)
1341                         return qla24xx_start_scsi(sp);
1342         }
1343
1344         /* Setup device pointers. */
1345
1346         qla25xx_set_que(sp, &rsp);
1347         req = vha->req;
1348
1349         /* So we know we haven't pci_map'ed anything yet */
1350         tot_dsds = 0;
1351
1352         /* Send marker if required */
1353         if (vha->marker_needed != 0) {
1354                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1355                     QLA_SUCCESS)
1356                         return QLA_FUNCTION_FAILED;
1357                 vha->marker_needed = 0;
1358         }
1359
1360         /* Acquire ring specific lock */
1361         spin_lock_irqsave(&ha->hardware_lock, flags);
1362
1363         /* Check for room in outstanding command list. */
1364         handle = req->current_outstanding_cmd;
1365         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1366                 handle++;
1367                 if (handle == MAX_OUTSTANDING_COMMANDS)
1368                         handle = 1;
1369                 if (!req->outstanding_cmds[handle])
1370                         break;
1371         }
1372
1373         if (index == MAX_OUTSTANDING_COMMANDS)
1374                 goto queuing_error;
1375
1376         /* Compute number of required data segments */
1377         /* Map the sg table so we have an accurate count of sg entries needed */
1378         if (scsi_sg_count(cmd)) {
1379                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1380                     scsi_sg_count(cmd), cmd->sc_data_direction);
1381                 if (unlikely(!nseg))
1382                         goto queuing_error;
1383                 else
1384                         sp->flags |= SRB_DMA_VALID;
1385         } else
1386                 nseg = 0;
1387
1388         /* number of required data segments */
1389         tot_dsds = nseg;
1390
1391         /* Compute number of required protection segments */
1392         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1393                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1394                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1395                 if (unlikely(!nseg))
1396                         goto queuing_error;
1397                 else
1398                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1399         } else {
1400                 nseg = 0;
1401         }
1402
1403         req_cnt = 1;
1404         /* Total Data and protection sg segment(s) */
1405         tot_prot_dsds = nseg;
1406         tot_dsds += nseg;
1407         if (req->cnt < (req_cnt + 2)) {
1408                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1409
1410                 if (req->ring_index < cnt)
1411                         req->cnt = cnt - req->ring_index;
1412                 else
1413                         req->cnt = req->length -
1414                                 (req->ring_index - cnt);
1415         }
1416
1417         if (req->cnt < (req_cnt + 2))
1418                 goto queuing_error;
1419
1420         status |= QDSS_GOT_Q_SPACE;
1421
1422         /* Build header part of command packet (excluding the OPCODE). */
1423         req->current_outstanding_cmd = handle;
1424         req->outstanding_cmds[handle] = sp;
1425         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1426         req->cnt -= req_cnt;
1427
1428         /* Fill-in common area */
1429         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1430         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1431
1432         clr_ptr = (uint32_t *)cmd_pkt + 2;
1433         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1434
1435         /* Set NPORT-ID and LUN number*/
1436         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1437         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1438         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1439         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1440
1441         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1442         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1443
1444         /* Total Data and protection segment(s) */
1445         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1446
1447         /* Build IOCB segments and adjust for data protection segments */
1448         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1449             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1450                 QLA_SUCCESS)
1451                 goto queuing_error;
1452
1453         cmd_pkt->entry_count = (uint8_t)req_cnt;
1454         /* Specify response queue number where completion should happen */
1455         cmd_pkt->entry_status = (uint8_t) rsp->id;
1456         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1457         wmb();
1458
1459         /* Adjust ring index. */
1460         req->ring_index++;
1461         if (req->ring_index == req->length) {
1462                 req->ring_index = 0;
1463                 req->ring_ptr = req->ring;
1464         } else
1465                 req->ring_ptr++;
1466
1467         /* Set chip new ring index. */
1468         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1469         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1470
1471         /* Manage unprocessed RIO/ZIO commands in response queue. */
1472         if (vha->flags.process_response_queue &&
1473             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1474                 qla24xx_process_response_queue(vha, rsp);
1475
1476         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1477
1478         return QLA_SUCCESS;
1479
1480 queuing_error:
1481         if (status & QDSS_GOT_Q_SPACE) {
1482                 req->outstanding_cmds[handle] = NULL;
1483                 req->cnt += req_cnt;
1484         }
1485         /* Cleanup will be performed by the caller (queuecommand) */
1486
1487         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1488
1489         DEBUG18(qla_printk(KERN_INFO, ha,
1490             "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
1491         return QLA_FUNCTION_FAILED;
1492 }
1493
1494
1495 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1496 {
1497         struct scsi_cmnd *cmd = sp->cmd;
1498         struct qla_hw_data *ha = sp->fcport->vha->hw;
1499         int affinity = cmd->request->cpu;
1500
1501         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1502                 affinity < ha->max_rsp_queues - 1)
1503                 *rsp = ha->rsp_q_map[affinity + 1];
1504          else
1505                 *rsp = ha->rsp_q_map[0];
1506 }
1507
1508 /* Generic Control-SRB manipulation functions. */
1509 void *
1510 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1511 {
1512         struct qla_hw_data *ha = vha->hw;
1513         struct req_que *req = ha->req_q_map[0];
1514         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1515         uint32_t index, handle;
1516         request_t *pkt;
1517         uint16_t cnt, req_cnt;
1518
1519         pkt = NULL;
1520         req_cnt = 1;
1521         handle = 0;
1522
1523         if (!sp)
1524                 goto skip_cmd_array;
1525
1526         /* Check for room in outstanding command list. */
1527         handle = req->current_outstanding_cmd;
1528         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1529                 handle++;
1530                 if (handle == MAX_OUTSTANDING_COMMANDS)
1531                         handle = 1;
1532                 if (!req->outstanding_cmds[handle])
1533                         break;
1534         }
1535         if (index == MAX_OUTSTANDING_COMMANDS)
1536                 goto queuing_error;
1537
1538         /* Prep command array. */
1539         req->current_outstanding_cmd = handle;
1540         req->outstanding_cmds[handle] = sp;
1541         sp->handle = handle;
1542
1543 skip_cmd_array:
1544         /* Check for room on request queue. */
1545         if (req->cnt < req_cnt) {
1546                 if (ha->mqenable)
1547                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1548                 else if (IS_QLA82XX(ha))
1549                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1550                 else if (IS_FWI2_CAPABLE(ha))
1551                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1552                 else
1553                         cnt = qla2x00_debounce_register(
1554                             ISP_REQ_Q_OUT(ha, &reg->isp));
1555
1556                 if  (req->ring_index < cnt)
1557                         req->cnt = cnt - req->ring_index;
1558                 else
1559                         req->cnt = req->length -
1560                             (req->ring_index - cnt);
1561         }
1562         if (req->cnt < req_cnt)
1563                 goto queuing_error;
1564
1565         /* Prep packet */
1566         req->cnt -= req_cnt;
1567         pkt = req->ring_ptr;
1568         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1569         pkt->entry_count = req_cnt;
1570         pkt->handle = handle;
1571
1572 queuing_error:
1573         return pkt;
1574 }
1575
1576 static void
1577 qla2x00_start_iocbs(srb_t *sp)
1578 {
1579         struct qla_hw_data *ha = sp->fcport->vha->hw;
1580         struct req_que *req = ha->req_q_map[0];
1581         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1582         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1583
1584         if (IS_QLA82XX(ha)) {
1585                 qla82xx_start_iocbs(sp);
1586         } else {
1587                 /* Adjust ring index. */
1588                 req->ring_index++;
1589                 if (req->ring_index == req->length) {
1590                         req->ring_index = 0;
1591                         req->ring_ptr = req->ring;
1592                 } else
1593                         req->ring_ptr++;
1594
1595                 /* Set chip new ring index. */
1596                 if (ha->mqenable) {
1597                         WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1598                         RD_REG_DWORD(&ioreg->hccr);
1599                 } else if (IS_QLA82XX(ha)) {
1600                         qla82xx_start_iocbs(sp);
1601                 } else if (IS_FWI2_CAPABLE(ha)) {
1602                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1603                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1604                 } else {
1605                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1606                                 req->ring_index);
1607                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1608                 }
1609         }
1610 }
1611
1612 static void
1613 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1614 {
1615         struct srb_ctx *ctx = sp->ctx;
1616         struct srb_iocb *lio = ctx->u.iocb_cmd;
1617
1618         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1619         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1620         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1621                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1622         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1623                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1624         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1625         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1626         logio->port_id[1] = sp->fcport->d_id.b.area;
1627         logio->port_id[2] = sp->fcport->d_id.b.domain;
1628         logio->vp_index = sp->fcport->vp_idx;
1629 }
1630
1631 static void
1632 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1633 {
1634         struct qla_hw_data *ha = sp->fcport->vha->hw;
1635         struct srb_ctx *ctx = sp->ctx;
1636         struct srb_iocb *lio = ctx->u.iocb_cmd;
1637         uint16_t opts;
1638
1639         mbx->entry_type = MBX_IOCB_TYPE;
1640         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1641         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1642         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1643         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1644         if (HAS_EXTENDED_IDS(ha)) {
1645                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1646                 mbx->mb10 = cpu_to_le16(opts);
1647         } else {
1648                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1649         }
1650         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1651         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1652             sp->fcport->d_id.b.al_pa);
1653         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1654 }
1655
1656 static void
1657 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1658 {
1659         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1660         logio->control_flags =
1661             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1662         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1663         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1664         logio->port_id[1] = sp->fcport->d_id.b.area;
1665         logio->port_id[2] = sp->fcport->d_id.b.domain;
1666         logio->vp_index = sp->fcport->vp_idx;
1667 }
1668
1669 static void
1670 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1671 {
1672         struct qla_hw_data *ha = sp->fcport->vha->hw;
1673
1674         mbx->entry_type = MBX_IOCB_TYPE;
1675         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1676         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1677         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1678             cpu_to_le16(sp->fcport->loop_id):
1679             cpu_to_le16(sp->fcport->loop_id << 8);
1680         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1681         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1682             sp->fcport->d_id.b.al_pa);
1683         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1684         /* Implicit: mbx->mbx10 = 0. */
1685 }
1686
1687 static void
1688 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1689 {
1690         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1691         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1692         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1693         logio->vp_index = sp->fcport->vp_idx;
1694 }
1695
1696 static void
1697 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1698 {
1699         struct qla_hw_data *ha = sp->fcport->vha->hw;
1700
1701         mbx->entry_type = MBX_IOCB_TYPE;
1702         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1703         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1704         if (HAS_EXTENDED_IDS(ha)) {
1705                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1706                 mbx->mb10 = cpu_to_le16(BIT_0);
1707         } else {
1708                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1709         }
1710         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1711         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1712         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1713         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1714         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1715 }
1716
1717 static void
1718 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1719 {
1720         uint32_t flags;
1721         unsigned int lun;
1722         struct fc_port *fcport = sp->fcport;
1723         scsi_qla_host_t *vha = fcport->vha;
1724         struct qla_hw_data *ha = vha->hw;
1725         struct srb_ctx *ctx = sp->ctx;
1726         struct srb_iocb *iocb = ctx->u.iocb_cmd;
1727         struct req_que *req = vha->req;
1728
1729         flags = iocb->u.tmf.flags;
1730         lun = iocb->u.tmf.lun;
1731
1732         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1733         tsk->entry_count = 1;
1734         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1735         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1736         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1737         tsk->control_flags = cpu_to_le32(flags);
1738         tsk->port_id[0] = fcport->d_id.b.al_pa;
1739         tsk->port_id[1] = fcport->d_id.b.area;
1740         tsk->port_id[2] = fcport->d_id.b.domain;
1741         tsk->vp_index = fcport->vp_idx;
1742
1743         if (flags == TCF_LUN_RESET) {
1744                 int_to_scsilun(lun, &tsk->lun);
1745                 host_to_fcp_swap((uint8_t *)&tsk->lun,
1746                         sizeof(tsk->lun));
1747         }
1748 }
1749
1750 static void
1751 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1752 {
1753         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1754
1755         els_iocb->entry_type = ELS_IOCB_TYPE;
1756         els_iocb->entry_count = 1;
1757         els_iocb->sys_define = 0;
1758         els_iocb->entry_status = 0;
1759         els_iocb->handle = sp->handle;
1760         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1761         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1762         els_iocb->vp_index = sp->fcport->vp_idx;
1763         els_iocb->sof_type = EST_SOFI3;
1764         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1765
1766         els_iocb->opcode =
1767             (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
1768             bsg_job->request->rqst_data.r_els.els_code :
1769             bsg_job->request->rqst_data.h_els.command_code;
1770         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1771         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1772         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1773         els_iocb->control_flags = 0;
1774         els_iocb->rx_byte_count =
1775             cpu_to_le32(bsg_job->reply_payload.payload_len);
1776         els_iocb->tx_byte_count =
1777             cpu_to_le32(bsg_job->request_payload.payload_len);
1778
1779         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1780             (bsg_job->request_payload.sg_list)));
1781         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1782             (bsg_job->request_payload.sg_list)));
1783         els_iocb->tx_len = cpu_to_le32(sg_dma_len
1784             (bsg_job->request_payload.sg_list));
1785
1786         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1787             (bsg_job->reply_payload.sg_list)));
1788         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1789             (bsg_job->reply_payload.sg_list)));
1790         els_iocb->rx_len = cpu_to_le32(sg_dma_len
1791             (bsg_job->reply_payload.sg_list));
1792 }
1793
1794 static void
1795 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
1796 {
1797         uint16_t        avail_dsds;
1798         uint32_t        *cur_dsd;
1799         struct scatterlist *sg;
1800         int index;
1801         uint16_t tot_dsds;
1802         scsi_qla_host_t *vha = sp->fcport->vha;
1803         struct qla_hw_data *ha = vha->hw;
1804         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1805         int loop_iterartion = 0;
1806         int cont_iocb_prsnt = 0;
1807         int entry_count = 1;
1808
1809         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
1810         ct_iocb->entry_type = CT_IOCB_TYPE;
1811         ct_iocb->entry_status = 0;
1812         ct_iocb->handle1 = sp->handle;
1813         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
1814         ct_iocb->status = __constant_cpu_to_le16(0);
1815         ct_iocb->control_flags = __constant_cpu_to_le16(0);
1816         ct_iocb->timeout = 0;
1817         ct_iocb->cmd_dsd_count =
1818             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1819         ct_iocb->total_dsd_count =
1820             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
1821         ct_iocb->req_bytecount =
1822             cpu_to_le32(bsg_job->request_payload.payload_len);
1823         ct_iocb->rsp_bytecount =
1824             cpu_to_le32(bsg_job->reply_payload.payload_len);
1825
1826         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
1827             (bsg_job->request_payload.sg_list)));
1828         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
1829             (bsg_job->request_payload.sg_list)));
1830         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
1831
1832         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
1833             (bsg_job->reply_payload.sg_list)));
1834         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
1835             (bsg_job->reply_payload.sg_list)));
1836         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
1837
1838         avail_dsds = 1;
1839         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
1840         index = 0;
1841         tot_dsds = bsg_job->reply_payload.sg_cnt;
1842
1843         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1844                 dma_addr_t       sle_dma;
1845                 cont_a64_entry_t *cont_pkt;
1846
1847                 /* Allocate additional continuation packets? */
1848                 if (avail_dsds == 0) {
1849                         /*
1850                         * Five DSDs are available in the Cont.
1851                         * Type 1 IOCB.
1852                                */
1853                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1854                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1855                         avail_dsds = 5;
1856                         cont_iocb_prsnt = 1;
1857                         entry_count++;
1858                 }
1859
1860                 sle_dma = sg_dma_address(sg);
1861                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
1862                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
1863                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
1864                 loop_iterartion++;
1865                 avail_dsds--;
1866         }
1867         ct_iocb->entry_count = entry_count;
1868 }
1869
1870 static void
1871 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1872 {
1873         uint16_t        avail_dsds;
1874         uint32_t        *cur_dsd;
1875         struct scatterlist *sg;
1876         int index;
1877         uint16_t tot_dsds;
1878         scsi_qla_host_t *vha = sp->fcport->vha;
1879         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1880         int loop_iterartion = 0;
1881         int cont_iocb_prsnt = 0;
1882         int entry_count = 1;
1883
1884         ct_iocb->entry_type = CT_IOCB_TYPE;
1885         ct_iocb->entry_status = 0;
1886         ct_iocb->sys_define = 0;
1887         ct_iocb->handle = sp->handle;
1888
1889         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1890         ct_iocb->vp_index = sp->fcport->vp_idx;
1891         ct_iocb->comp_status = __constant_cpu_to_le16(0);
1892
1893         ct_iocb->cmd_dsd_count =
1894             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1895         ct_iocb->timeout = 0;
1896         ct_iocb->rsp_dsd_count =
1897             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1898         ct_iocb->rsp_byte_count =
1899             cpu_to_le32(bsg_job->reply_payload.payload_len);
1900         ct_iocb->cmd_byte_count =
1901             cpu_to_le32(bsg_job->request_payload.payload_len);
1902         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1903             (bsg_job->request_payload.sg_list)));
1904         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1905            (bsg_job->request_payload.sg_list)));
1906         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1907             (bsg_job->request_payload.sg_list));
1908
1909         avail_dsds = 1;
1910         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1911         index = 0;
1912         tot_dsds = bsg_job->reply_payload.sg_cnt;
1913
1914         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1915                 dma_addr_t       sle_dma;
1916                 cont_a64_entry_t *cont_pkt;
1917
1918                 /* Allocate additional continuation packets? */
1919                 if (avail_dsds == 0) {
1920                         /*
1921                         * Five DSDs are available in the Cont.
1922                         * Type 1 IOCB.
1923                                */
1924                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1925                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1926                         avail_dsds = 5;
1927                         cont_iocb_prsnt = 1;
1928                         entry_count++;
1929                 }
1930
1931                 sle_dma = sg_dma_address(sg);
1932                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
1933                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
1934                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
1935                 loop_iterartion++;
1936                 avail_dsds--;
1937         }
1938         ct_iocb->entry_count = entry_count;
1939 }
1940
1941 int
1942 qla2x00_start_sp(srb_t *sp)
1943 {
1944         int rval;
1945         struct qla_hw_data *ha = sp->fcport->vha->hw;
1946         void *pkt;
1947         struct srb_ctx *ctx = sp->ctx;
1948         unsigned long flags;
1949
1950         rval = QLA_FUNCTION_FAILED;
1951         spin_lock_irqsave(&ha->hardware_lock, flags);
1952         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
1953         if (!pkt)
1954                 goto done;
1955
1956         rval = QLA_SUCCESS;
1957         switch (ctx->type) {
1958         case SRB_LOGIN_CMD:
1959                 IS_FWI2_CAPABLE(ha) ?
1960                     qla24xx_login_iocb(sp, pkt) :
1961                     qla2x00_login_iocb(sp, pkt);
1962                 break;
1963         case SRB_LOGOUT_CMD:
1964                 IS_FWI2_CAPABLE(ha) ?
1965                     qla24xx_logout_iocb(sp, pkt) :
1966                     qla2x00_logout_iocb(sp, pkt);
1967                 break;
1968         case SRB_ELS_CMD_RPT:
1969         case SRB_ELS_CMD_HST:
1970                 qla24xx_els_iocb(sp, pkt);
1971                 break;
1972         case SRB_CT_CMD:
1973                 IS_FWI2_CAPABLE(ha) ?
1974                 qla24xx_ct_iocb(sp, pkt) :
1975                 qla2x00_ct_iocb(sp, pkt);
1976                 break;
1977         case SRB_ADISC_CMD:
1978                 IS_FWI2_CAPABLE(ha) ?
1979                     qla24xx_adisc_iocb(sp, pkt) :
1980                     qla2x00_adisc_iocb(sp, pkt);
1981                 break;
1982         case SRB_TM_CMD:
1983                 qla24xx_tm_iocb(sp, pkt);
1984                 break;
1985         default:
1986                 break;
1987         }
1988
1989         wmb();
1990         qla2x00_start_iocbs(sp);
1991 done:
1992         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1993         return rval;
1994 }