Merge branch 'bugzilla-21212' into release
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
15
16 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 /**
18  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19  * @cmd: SCSI command
20  *
21  * Returns the proper CF_* direction based on CDB.
22  */
23 static inline uint16_t
24 qla2x00_get_cmd_direction(srb_t *sp)
25 {
26         uint16_t cflags;
27
28         cflags = 0;
29
30         /* Set transfer direction */
31         if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32                 cflags = CF_WRITE;
33                 sp->fcport->vha->hw->qla_stats.output_bytes +=
34                     scsi_bufflen(sp->cmd);
35         } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36                 cflags = CF_READ;
37                 sp->fcport->vha->hw->qla_stats.input_bytes +=
38                     scsi_bufflen(sp->cmd);
39         }
40         return (cflags);
41 }
42
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54         uint16_t iocbs;
55
56         iocbs = 1;
57         if (dsds > 3) {
58                 iocbs += (dsds - 3) / 7;
59                 if ((dsds - 3) % 7)
60                         iocbs++;
61         }
62         return (iocbs);
63 }
64
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76         uint16_t iocbs;
77
78         iocbs = 1;
79         if (dsds > 2) {
80                 iocbs += (dsds - 2) / 5;
81                 if ((dsds - 2) % 5)
82                         iocbs++;
83         }
84         return (iocbs);
85 }
86
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96         cont_entry_t *cont_pkt;
97         struct req_que *req = vha->req;
98         /* Adjust ring index. */
99         req->ring_index++;
100         if (req->ring_index == req->length) {
101                 req->ring_index = 0;
102                 req->ring_ptr = req->ring;
103         } else {
104                 req->ring_ptr++;
105         }
106
107         cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109         /* Load packet defaults. */
110         *((uint32_t *)(&cont_pkt->entry_type)) =
111             __constant_cpu_to_le32(CONTINUE_TYPE);
112
113         return (cont_pkt);
114 }
115
116 /**
117  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118  * @ha: HA context
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
124 {
125         cont_a64_entry_t *cont_pkt;
126
127         struct req_que *req = vha->req;
128         /* Adjust ring index. */
129         req->ring_index++;
130         if (req->ring_index == req->length) {
131                 req->ring_index = 0;
132                 req->ring_ptr = req->ring;
133         } else {
134                 req->ring_ptr++;
135         }
136
137         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138
139         /* Load packet defaults. */
140         *((uint32_t *)(&cont_pkt->entry_type)) =
141             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
142
143         return (cont_pkt);
144 }
145
146 static inline int
147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 {
149         uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
150
151         /* We only support T10 DIF right now */
152         if (guard != SHOST_DIX_GUARD_CRC) {
153                 DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
154                 return 0;
155         }
156
157         /* We always use DIFF Bundling for best performance */
158         *fw_prot_opts = 0;
159
160         /* Translate SCSI opcode to a protection opcode */
161         switch (scsi_get_prot_op(sp->cmd)) {
162         case SCSI_PROT_READ_STRIP:
163                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
164                 break;
165         case SCSI_PROT_WRITE_INSERT:
166                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167                 break;
168         case SCSI_PROT_READ_INSERT:
169                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
170                 break;
171         case SCSI_PROT_WRITE_STRIP:
172                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173                 break;
174         case SCSI_PROT_READ_PASS:
175                 *fw_prot_opts |= PO_MODE_DIF_PASS;
176                 break;
177         case SCSI_PROT_WRITE_PASS:
178                 *fw_prot_opts |= PO_MODE_DIF_PASS;
179                 break;
180         default:        /* Normal Request */
181                 *fw_prot_opts |= PO_MODE_DIF_PASS;
182                 break;
183         }
184
185         return scsi_prot_sg_count(sp->cmd);
186 }
187
188 /*
189  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190  * capable IOCB types.
191  *
192  * @sp: SRB command to process
193  * @cmd_pkt: Command type 2 IOCB
194  * @tot_dsds: Total number of segments to transfer
195  */
196 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197     uint16_t tot_dsds)
198 {
199         uint16_t        avail_dsds;
200         uint32_t        *cur_dsd;
201         scsi_qla_host_t *vha;
202         struct scsi_cmnd *cmd;
203         struct scatterlist *sg;
204         int i;
205
206         cmd = sp->cmd;
207
208         /* Update entry type to indicate Command Type 2 IOCB */
209         *((uint32_t *)(&cmd_pkt->entry_type)) =
210             __constant_cpu_to_le32(COMMAND_TYPE);
211
212         /* No data transfer */
213         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
214                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215                 return;
216         }
217
218         vha = sp->fcport->vha;
219         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
220
221         /* Three DSDs are available in the Command Type 2 IOCB */
222         avail_dsds = 3;
223         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224
225         /* Load data segments */
226         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227                 cont_entry_t *cont_pkt;
228
229                 /* Allocate additional continuation packets? */
230                 if (avail_dsds == 0) {
231                         /*
232                          * Seven DSDs are available in the Continuation
233                          * Type 0 IOCB.
234                          */
235                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
236                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237                         avail_dsds = 7;
238                 }
239
240                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242                 avail_dsds--;
243         }
244 }
245
246 /**
247  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248  * capable IOCB types.
249  *
250  * @sp: SRB command to process
251  * @cmd_pkt: Command type 3 IOCB
252  * @tot_dsds: Total number of segments to transfer
253  */
254 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255     uint16_t tot_dsds)
256 {
257         uint16_t        avail_dsds;
258         uint32_t        *cur_dsd;
259         scsi_qla_host_t *vha;
260         struct scsi_cmnd *cmd;
261         struct scatterlist *sg;
262         int i;
263
264         cmd = sp->cmd;
265
266         /* Update entry type to indicate Command Type 3 IOCB */
267         *((uint32_t *)(&cmd_pkt->entry_type)) =
268             __constant_cpu_to_le32(COMMAND_A64_TYPE);
269
270         /* No data transfer */
271         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
272                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273                 return;
274         }
275
276         vha = sp->fcport->vha;
277         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
278
279         /* Two DSDs are available in the Command Type 3 IOCB */
280         avail_dsds = 2;
281         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282
283         /* Load data segments */
284         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285                 dma_addr_t      sle_dma;
286                 cont_a64_entry_t *cont_pkt;
287
288                 /* Allocate additional continuation packets? */
289                 if (avail_dsds == 0) {
290                         /*
291                          * Five DSDs are available in the Continuation
292                          * Type 1 IOCB.
293                          */
294                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
295                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296                         avail_dsds = 5;
297                 }
298
299                 sle_dma = sg_dma_address(sg);
300                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303                 avail_dsds--;
304         }
305 }
306
307 /**
308  * qla2x00_start_scsi() - Send a SCSI command to the ISP
309  * @sp: command to send to the ISP
310  *
311  * Returns non-zero if a failure occurred, else zero.
312  */
313 int
314 qla2x00_start_scsi(srb_t *sp)
315 {
316         int             ret, nseg;
317         unsigned long   flags;
318         scsi_qla_host_t *vha;
319         struct scsi_cmnd *cmd;
320         uint32_t        *clr_ptr;
321         uint32_t        index;
322         uint32_t        handle;
323         cmd_entry_t     *cmd_pkt;
324         uint16_t        cnt;
325         uint16_t        req_cnt;
326         uint16_t        tot_dsds;
327         struct device_reg_2xxx __iomem *reg;
328         struct qla_hw_data *ha;
329         struct req_que *req;
330         struct rsp_que *rsp;
331
332         /* Setup device pointers. */
333         ret = 0;
334         vha = sp->fcport->vha;
335         ha = vha->hw;
336         reg = &ha->iobase->isp;
337         cmd = sp->cmd;
338         req = ha->req_q_map[0];
339         rsp = ha->rsp_q_map[0];
340         /* So we know we haven't pci_map'ed anything yet */
341         tot_dsds = 0;
342
343         /* Send marker if required */
344         if (vha->marker_needed != 0) {
345                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
346                                                         != QLA_SUCCESS)
347                         return (QLA_FUNCTION_FAILED);
348                 vha->marker_needed = 0;
349         }
350
351         /* Acquire ring specific lock */
352         spin_lock_irqsave(&ha->hardware_lock, flags);
353
354         /* Check for room in outstanding command list. */
355         handle = req->current_outstanding_cmd;
356         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
357                 handle++;
358                 if (handle == MAX_OUTSTANDING_COMMANDS)
359                         handle = 1;
360                 if (!req->outstanding_cmds[handle])
361                         break;
362         }
363         if (index == MAX_OUTSTANDING_COMMANDS)
364                 goto queuing_error;
365
366         /* Map the sg table so we have an accurate count of sg entries needed */
367         if (scsi_sg_count(cmd)) {
368                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
369                     scsi_sg_count(cmd), cmd->sc_data_direction);
370                 if (unlikely(!nseg))
371                         goto queuing_error;
372         } else
373                 nseg = 0;
374
375         tot_dsds = nseg;
376
377         /* Calculate the number of request entries needed. */
378         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
379         if (req->cnt < (req_cnt + 2)) {
380                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
381                 if (req->ring_index < cnt)
382                         req->cnt = cnt - req->ring_index;
383                 else
384                         req->cnt = req->length -
385                             (req->ring_index - cnt);
386         }
387         if (req->cnt < (req_cnt + 2))
388                 goto queuing_error;
389
390         /* Build command packet */
391         req->current_outstanding_cmd = handle;
392         req->outstanding_cmds[handle] = sp;
393         sp->handle = handle;
394         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395         req->cnt -= req_cnt;
396
397         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398         cmd_pkt->handle = handle;
399         /* Zero out remaining portion of packet. */
400         clr_ptr = (uint32_t *)cmd_pkt + 2;
401         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
404         /* Set target ID and LUN number*/
405         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406         cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
407
408         /* Update tagged queuing modifier */
409         cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
410
411         /* Load SCSI command packet. */
412         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
413         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
414
415         /* Build IOCB segments */
416         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
417
418         /* Set total data segment count. */
419         cmd_pkt->entry_count = (uint8_t)req_cnt;
420         wmb();
421
422         /* Adjust ring index. */
423         req->ring_index++;
424         if (req->ring_index == req->length) {
425                 req->ring_index = 0;
426                 req->ring_ptr = req->ring;
427         } else
428                 req->ring_ptr++;
429
430         sp->flags |= SRB_DMA_VALID;
431
432         /* Set chip new ring index. */
433         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
434         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
435
436         /* Manage unprocessed RIO/ZIO commands in response queue. */
437         if (vha->flags.process_response_queue &&
438             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
439                 qla2x00_process_response_queue(rsp);
440
441         spin_unlock_irqrestore(&ha->hardware_lock, flags);
442         return (QLA_SUCCESS);
443
444 queuing_error:
445         if (tot_dsds)
446                 scsi_dma_unmap(cmd);
447
448         spin_unlock_irqrestore(&ha->hardware_lock, flags);
449
450         return (QLA_FUNCTION_FAILED);
451 }
452
453 /**
454  * qla2x00_marker() - Send a marker IOCB to the firmware.
455  * @ha: HA context
456  * @loop_id: loop ID
457  * @lun: LUN
458  * @type: marker modifier
459  *
460  * Can be called from both normal and interrupt context.
461  *
462  * Returns non-zero if a failure occurred, else zero.
463  */
464 static int
465 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
466                         struct rsp_que *rsp, uint16_t loop_id,
467                         uint16_t lun, uint8_t type)
468 {
469         mrk_entry_t *mrk;
470         struct mrk_entry_24xx *mrk24;
471         struct qla_hw_data *ha = vha->hw;
472         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
473
474         mrk24 = NULL;
475         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
476         if (mrk == NULL) {
477                 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
478                     __func__, base_vha->host_no));
479
480                 return (QLA_FUNCTION_FAILED);
481         }
482
483         mrk->entry_type = MARKER_TYPE;
484         mrk->modifier = type;
485         if (type != MK_SYNC_ALL) {
486                 if (IS_FWI2_CAPABLE(ha)) {
487                         mrk24 = (struct mrk_entry_24xx *) mrk;
488                         mrk24->nport_handle = cpu_to_le16(loop_id);
489                         mrk24->lun[1] = LSB(lun);
490                         mrk24->lun[2] = MSB(lun);
491                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
492                         mrk24->vp_index = vha->vp_idx;
493                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
494                 } else {
495                         SET_TARGET_ID(ha, mrk->target, loop_id);
496                         mrk->lun = cpu_to_le16(lun);
497                 }
498         }
499         wmb();
500
501         qla2x00_isp_cmd(vha, req);
502
503         return (QLA_SUCCESS);
504 }
505
506 int
507 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
508                 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
509                 uint8_t type)
510 {
511         int ret;
512         unsigned long flags = 0;
513
514         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
515         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
516         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
517
518         return (ret);
519 }
520
521 /**
522  * qla2x00_isp_cmd() - Modify the request ring pointer.
523  * @ha: HA context
524  *
525  * Note: The caller must hold the hardware lock before calling this routine.
526  */
527 static void
528 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
529 {
530         struct qla_hw_data *ha = vha->hw;
531         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
532         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
533
534         DEBUG5(printk("%s(): IOCB data:\n", __func__));
535         DEBUG5(qla2x00_dump_buffer(
536             (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
537
538         /* Adjust ring index. */
539         req->ring_index++;
540         if (req->ring_index == req->length) {
541                 req->ring_index = 0;
542                 req->ring_ptr = req->ring;
543         } else
544                 req->ring_ptr++;
545
546         /* Set chip new ring index. */
547         if (IS_QLA82XX(ha)) {
548                 uint32_t dbval = 0x04 | (ha->portnum << 5);
549
550                 /* write, read and verify logic */
551                 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
552                 if (ql2xdbwr)
553                         qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
554                 else {
555                         WRT_REG_DWORD(
556                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
557                                 dbval);
558                         wmb();
559                         while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
560                                 WRT_REG_DWORD((unsigned long __iomem *)
561                                         ha->nxdb_wr_ptr, dbval);
562                                 wmb();
563                         }
564                 }
565         } else if (ha->mqenable) {
566                 /* Set chip new ring index. */
567                 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
568                 RD_REG_DWORD(&ioreg->hccr);
569         } else {
570                 if (IS_FWI2_CAPABLE(ha)) {
571                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
572                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
573                 } else {
574                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
575                                 req->ring_index);
576                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
577                 }
578         }
579
580 }
581
582 /**
583  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
584  * Continuation Type 1 IOCBs to allocate.
585  *
586  * @dsds: number of data segment decriptors needed
587  *
588  * Returns the number of IOCB entries needed to store @dsds.
589  */
590 inline uint16_t
591 qla24xx_calc_iocbs(uint16_t dsds)
592 {
593         uint16_t iocbs;
594
595         iocbs = 1;
596         if (dsds > 1) {
597                 iocbs += (dsds - 1) / 5;
598                 if ((dsds - 1) % 5)
599                         iocbs++;
600         }
601         DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
602             __func__, iocbs));
603         return iocbs;
604 }
605
606 /**
607  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
608  * IOCB types.
609  *
610  * @sp: SRB command to process
611  * @cmd_pkt: Command type 3 IOCB
612  * @tot_dsds: Total number of segments to transfer
613  */
614 inline void
615 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
616     uint16_t tot_dsds)
617 {
618         uint16_t        avail_dsds;
619         uint32_t        *cur_dsd;
620         scsi_qla_host_t *vha;
621         struct scsi_cmnd *cmd;
622         struct scatterlist *sg;
623         int i;
624         struct req_que *req;
625
626         cmd = sp->cmd;
627
628         /* Update entry type to indicate Command Type 3 IOCB */
629         *((uint32_t *)(&cmd_pkt->entry_type)) =
630             __constant_cpu_to_le32(COMMAND_TYPE_7);
631
632         /* No data transfer */
633         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
634                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
635                 return;
636         }
637
638         vha = sp->fcport->vha;
639         req = vha->req;
640
641         /* Set transfer direction */
642         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
643                 cmd_pkt->task_mgmt_flags =
644                     __constant_cpu_to_le16(TMF_WRITE_DATA);
645                 sp->fcport->vha->hw->qla_stats.output_bytes +=
646                     scsi_bufflen(sp->cmd);
647         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
648                 cmd_pkt->task_mgmt_flags =
649                     __constant_cpu_to_le16(TMF_READ_DATA);
650                 sp->fcport->vha->hw->qla_stats.input_bytes +=
651                     scsi_bufflen(sp->cmd);
652         }
653
654         /* One DSD is available in the Command Type 3 IOCB */
655         avail_dsds = 1;
656         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
657
658         /* Load data segments */
659
660         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
661                 dma_addr_t      sle_dma;
662                 cont_a64_entry_t *cont_pkt;
663
664                 /* Allocate additional continuation packets? */
665                 if (avail_dsds == 0) {
666                         /*
667                          * Five DSDs are available in the Continuation
668                          * Type 1 IOCB.
669                          */
670                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
671                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
672                         avail_dsds = 5;
673                 }
674
675                 sle_dma = sg_dma_address(sg);
676                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
677                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
678                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
679                 avail_dsds--;
680         }
681 }
682
683 struct fw_dif_context {
684         uint32_t ref_tag;
685         uint16_t app_tag;
686         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
687         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
688 };
689
690 /*
691  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
692  *
693  */
694 static inline void
695 qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
696     unsigned int protcnt)
697 {
698         struct sd_dif_tuple *spt;
699         unsigned char op = scsi_get_prot_op(cmd);
700
701         switch (scsi_get_prot_type(cmd)) {
702         /* For TYPE 0 protection: no checking */
703         case SCSI_PROT_DIF_TYPE0:
704                 pkt->ref_tag_mask[0] = 0x00;
705                 pkt->ref_tag_mask[1] = 0x00;
706                 pkt->ref_tag_mask[2] = 0x00;
707                 pkt->ref_tag_mask[3] = 0x00;
708                 break;
709
710         /*
711          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
712          * match LBA in CDB + N
713          */
714         case SCSI_PROT_DIF_TYPE2:
715                 if (!ql2xenablehba_err_chk)
716                         break;
717
718                 if (scsi_prot_sg_count(cmd)) {
719                         spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
720                             scsi_prot_sglist(cmd)[0].offset;
721                         pkt->app_tag = swab32(spt->app_tag);
722                         pkt->app_tag_mask[0] =  0xff;
723                         pkt->app_tag_mask[1] =  0xff;
724                 }
725
726                 pkt->ref_tag = cpu_to_le32((uint32_t)
727                     (0xffffffff & scsi_get_lba(cmd)));
728
729                 /* enable ALL bytes of the ref tag */
730                 pkt->ref_tag_mask[0] = 0xff;
731                 pkt->ref_tag_mask[1] = 0xff;
732                 pkt->ref_tag_mask[2] = 0xff;
733                 pkt->ref_tag_mask[3] = 0xff;
734                 break;
735
736         /* For Type 3 protection: 16 bit GUARD only */
737         case SCSI_PROT_DIF_TYPE3:
738                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
739                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
740                                                                 0x00;
741                 break;
742
743         /*
744          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
745          * 16 bit app tag.
746          */
747         case SCSI_PROT_DIF_TYPE1:
748                 if (!ql2xenablehba_err_chk)
749                         break;
750
751                 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
752                     op == SCSI_PROT_WRITE_PASS)) {
753                         spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
754                             scsi_prot_sglist(cmd)[0].offset;
755                         DEBUG18(printk(KERN_DEBUG
756                             "%s(): LBA from user %p, lba = 0x%x\n",
757                             __func__, spt, (int)spt->ref_tag));
758                         pkt->ref_tag = swab32(spt->ref_tag);
759                         pkt->app_tag_mask[0] = 0x0;
760                         pkt->app_tag_mask[1] = 0x0;
761                 } else {
762                         pkt->ref_tag = cpu_to_le32((uint32_t)
763                             (0xffffffff & scsi_get_lba(cmd)));
764                         pkt->app_tag = __constant_cpu_to_le16(0);
765                         pkt->app_tag_mask[0] = 0x0;
766                         pkt->app_tag_mask[1] = 0x0;
767                 }
768                 /* enable ALL bytes of the ref tag */
769                 pkt->ref_tag_mask[0] = 0xff;
770                 pkt->ref_tag_mask[1] = 0xff;
771                 pkt->ref_tag_mask[2] = 0xff;
772                 pkt->ref_tag_mask[3] = 0xff;
773                 break;
774         }
775
776         DEBUG18(printk(KERN_DEBUG
777             "%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
778             " app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
779             " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
780             (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
781 }
782
783
784 static int
785 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
786         uint16_t tot_dsds)
787 {
788         void *next_dsd;
789         uint8_t avail_dsds = 0;
790         uint32_t dsd_list_len;
791         struct dsd_dma *dsd_ptr;
792         struct scatterlist *sg;
793         uint32_t *cur_dsd = dsd;
794         int     i;
795         uint16_t        used_dsds = tot_dsds;
796
797         uint8_t         *cp;
798
799         scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
800                 dma_addr_t      sle_dma;
801
802                 /* Allocate additional continuation packets? */
803                 if (avail_dsds == 0) {
804                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
805                                         QLA_DSDS_PER_IOCB : used_dsds;
806                         dsd_list_len = (avail_dsds + 1) * 12;
807                         used_dsds -= avail_dsds;
808
809                         /* allocate tracking DS */
810                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
811                         if (!dsd_ptr)
812                                 return 1;
813
814                         /* allocate new list */
815                         dsd_ptr->dsd_addr = next_dsd =
816                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
817                                 &dsd_ptr->dsd_list_dma);
818
819                         if (!next_dsd) {
820                                 /*
821                                  * Need to cleanup only this dsd_ptr, rest
822                                  * will be done by sp_free_dma()
823                                  */
824                                 kfree(dsd_ptr);
825                                 return 1;
826                         }
827
828                         list_add_tail(&dsd_ptr->list,
829                             &((struct crc_context *)sp->ctx)->dsd_list);
830
831                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
832
833                         /* add new list to cmd iocb or last list */
834                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
835                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
836                         *cur_dsd++ = dsd_list_len;
837                         cur_dsd = (uint32_t *)next_dsd;
838                 }
839                 sle_dma = sg_dma_address(sg);
840                 DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
841                     " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
842                     MSD(sle_dma), sg_dma_len(sg)));
843                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
844                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
845                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
846                 avail_dsds--;
847
848                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
849                         cp = page_address(sg_page(sg)) + sg->offset;
850                         DEBUG18(printk("%s(): User Data buffer= %p:\n",
851                             __func__ , cp));
852                 }
853         }
854         /* Null termination */
855         *cur_dsd++ = 0;
856         *cur_dsd++ = 0;
857         *cur_dsd++ = 0;
858         return 0;
859 }
860
861 static int
862 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
863                                                         uint32_t *dsd,
864         uint16_t tot_dsds)
865 {
866         void *next_dsd;
867         uint8_t avail_dsds = 0;
868         uint32_t dsd_list_len;
869         struct dsd_dma *dsd_ptr;
870         struct scatterlist *sg;
871         int     i;
872         struct scsi_cmnd *cmd;
873         uint32_t *cur_dsd = dsd;
874         uint16_t        used_dsds = tot_dsds;
875
876         uint8_t         *cp;
877
878
879         cmd = sp->cmd;
880         scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
881                 dma_addr_t      sle_dma;
882
883                 /* Allocate additional continuation packets? */
884                 if (avail_dsds == 0) {
885                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
886                                                 QLA_DSDS_PER_IOCB : used_dsds;
887                         dsd_list_len = (avail_dsds + 1) * 12;
888                         used_dsds -= avail_dsds;
889
890                         /* allocate tracking DS */
891                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
892                         if (!dsd_ptr)
893                                 return 1;
894
895                         /* allocate new list */
896                         dsd_ptr->dsd_addr = next_dsd =
897                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
898                                 &dsd_ptr->dsd_list_dma);
899
900                         if (!next_dsd) {
901                                 /*
902                                  * Need to cleanup only this dsd_ptr, rest
903                                  * will be done by sp_free_dma()
904                                  */
905                                 kfree(dsd_ptr);
906                                 return 1;
907                         }
908
909                         list_add_tail(&dsd_ptr->list,
910                             &((struct crc_context *)sp->ctx)->dsd_list);
911
912                         sp->flags |= SRB_CRC_CTX_DSD_VALID;
913
914                         /* add new list to cmd iocb or last list */
915                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
916                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
917                         *cur_dsd++ = dsd_list_len;
918                         cur_dsd = (uint32_t *)next_dsd;
919                 }
920                 sle_dma = sg_dma_address(sg);
921                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
922                         DEBUG18(printk(KERN_DEBUG
923                             "%s(): %p, sg entry %d - addr =0x%x"
924                             "0x%x, len =%d\n", __func__ , cur_dsd, i,
925                             LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
926                 }
927                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
928                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
929                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
930
931                 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
932                         cp = page_address(sg_page(sg)) + sg->offset;
933                         DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
934                             __func__ , cp));
935                 }
936                 avail_dsds--;
937         }
938         /* Null termination */
939         *cur_dsd++ = 0;
940         *cur_dsd++ = 0;
941         *cur_dsd++ = 0;
942         return 0;
943 }
944
945 /**
946  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
947  *                                                      Type 6 IOCB types.
948  *
949  * @sp: SRB command to process
950  * @cmd_pkt: Command type 3 IOCB
951  * @tot_dsds: Total number of segments to transfer
952  */
953 static inline int
954 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
955     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
956 {
957         uint32_t                *cur_dsd, *fcp_dl;
958         scsi_qla_host_t         *vha;
959         struct scsi_cmnd        *cmd;
960         struct scatterlist      *cur_seg;
961         int                     sgc;
962         uint32_t                total_bytes;
963         uint32_t                data_bytes;
964         uint32_t                dif_bytes;
965         uint8_t                 bundling = 1;
966         uint16_t                blk_size;
967         uint8_t                 *clr_ptr;
968         struct crc_context      *crc_ctx_pkt = NULL;
969         struct qla_hw_data      *ha;
970         uint8_t                 additional_fcpcdb_len;
971         uint16_t                fcp_cmnd_len;
972         struct fcp_cmnd         *fcp_cmnd;
973         dma_addr_t              crc_ctx_dma;
974
975         cmd = sp->cmd;
976
977         sgc = 0;
978         /* Update entry type to indicate Command Type CRC_2 IOCB */
979         *((uint32_t *)(&cmd_pkt->entry_type)) =
980             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
981
982         /* No data transfer */
983         data_bytes = scsi_bufflen(cmd);
984         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
985                 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
986                     __func__, data_bytes));
987                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
988                 return QLA_SUCCESS;
989         }
990
991         vha = sp->fcport->vha;
992         ha = vha->hw;
993
994         DEBUG18(printk(KERN_DEBUG
995             "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
996             vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
997
998         cmd_pkt->vp_index = sp->fcport->vp_idx;
999
1000         /* Set transfer direction */
1001         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1002                 cmd_pkt->control_flags =
1003                     __constant_cpu_to_le16(CF_WRITE_DATA);
1004         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1005                 cmd_pkt->control_flags =
1006                     __constant_cpu_to_le16(CF_READ_DATA);
1007         }
1008
1009         tot_prot_dsds = scsi_prot_sg_count(cmd);
1010         if (!tot_prot_dsds)
1011                 bundling = 0;
1012
1013         /* Allocate CRC context from global pool */
1014         crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1015             GFP_ATOMIC, &crc_ctx_dma);
1016
1017         if (!crc_ctx_pkt)
1018                 goto crc_queuing_error;
1019
1020         /* Zero out CTX area. */
1021         clr_ptr = (uint8_t *)crc_ctx_pkt;
1022         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1023
1024         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1025
1026         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1027
1028         /* Set handle */
1029         crc_ctx_pkt->handle = cmd_pkt->handle;
1030
1031         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1032
1033         qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *)
1034             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1035
1036         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1037         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1038         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1039
1040         /* Determine SCSI command length -- align to 4 byte boundary */
1041         if (cmd->cmd_len > 16) {
1042                 DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
1043                     __func__));
1044                 additional_fcpcdb_len = cmd->cmd_len - 16;
1045                 if ((cmd->cmd_len % 4) != 0) {
1046                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1047                         goto crc_queuing_error;
1048                 }
1049                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1050         } else {
1051                 additional_fcpcdb_len = 0;
1052                 fcp_cmnd_len = 12 + 16 + 4;
1053         }
1054
1055         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1056
1057         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1058         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1059                 fcp_cmnd->additional_cdb_len |= 1;
1060         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1061                 fcp_cmnd->additional_cdb_len |= 2;
1062
1063         int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1064         host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun));
1065         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1066         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1067         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1068             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1069         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1070             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1071         fcp_cmnd->task_attribute = 0;
1072         fcp_cmnd->task_management = 0;
1073
1074         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1075
1076         DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
1077             "entries %d, data bytes %d, Protection entries %d\n",
1078             __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
1079             data_bytes, tot_prot_dsds));
1080
1081         /* Compute dif len and adjust data len to incude protection */
1082         total_bytes = data_bytes;
1083         dif_bytes = 0;
1084         blk_size = cmd->device->sector_size;
1085         if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1086                 dif_bytes = (data_bytes / blk_size) * 8;
1087                 total_bytes += dif_bytes;
1088         }
1089
1090         if (!ql2xenablehba_err_chk)
1091                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1092
1093         if (!bundling) {
1094                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1095         } else {
1096                 /*
1097                  * Configure Bundling if we need to fetch interlaving
1098                  * protection PCI accesses
1099                  */
1100                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1101                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1102                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1103                                                         tot_prot_dsds);
1104                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1105         }
1106
1107         /* Finish the common fields of CRC pkt */
1108         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1109         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1110         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1111         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1112         /* Fibre channel byte count */
1113         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1114         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1115             additional_fcpcdb_len);
1116         *fcp_dl = htonl(total_bytes);
1117
1118         DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
1119             " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
1120             vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
1121             crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
1122
1123         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1124                 DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
1125                     __func__, data_bytes));
1126                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1127                 return QLA_SUCCESS;
1128         }
1129         /* Walks data segments */
1130
1131         cmd_pkt->control_flags |=
1132             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1133         if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1134             (tot_dsds - tot_prot_dsds)))
1135                 goto crc_queuing_error;
1136
1137         if (bundling && tot_prot_dsds) {
1138                 /* Walks dif segments */
1139                 cur_seg = scsi_prot_sglist(cmd);
1140                 cmd_pkt->control_flags |=
1141                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1142                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1143                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1144                     tot_prot_dsds))
1145                         goto crc_queuing_error;
1146         }
1147         return QLA_SUCCESS;
1148
1149 crc_queuing_error:
1150         DEBUG18(qla_printk(KERN_INFO, ha,
1151             "CMD sent FAILED crc_q error:sp = %p\n", sp));
1152         /* Cleanup will be performed by the caller */
1153
1154         return QLA_FUNCTION_FAILED;
1155 }
1156
1157 /**
1158  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1159  * @sp: command to send to the ISP
1160  *
1161  * Returns non-zero if a failure occurred, else zero.
1162  */
1163 int
1164 qla24xx_start_scsi(srb_t *sp)
1165 {
1166         int             ret, nseg;
1167         unsigned long   flags;
1168         uint32_t        *clr_ptr;
1169         uint32_t        index;
1170         uint32_t        handle;
1171         struct cmd_type_7 *cmd_pkt;
1172         uint16_t        cnt;
1173         uint16_t        req_cnt;
1174         uint16_t        tot_dsds;
1175         struct req_que *req = NULL;
1176         struct rsp_que *rsp = NULL;
1177         struct scsi_cmnd *cmd = sp->cmd;
1178         struct scsi_qla_host *vha = sp->fcport->vha;
1179         struct qla_hw_data *ha = vha->hw;
1180
1181         /* Setup device pointers. */
1182         ret = 0;
1183
1184         qla25xx_set_que(sp, &rsp);
1185         req = vha->req;
1186
1187         /* So we know we haven't pci_map'ed anything yet */
1188         tot_dsds = 0;
1189
1190         /* Send marker if required */
1191         if (vha->marker_needed != 0) {
1192                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
1193                                                         != QLA_SUCCESS)
1194                         return QLA_FUNCTION_FAILED;
1195                 vha->marker_needed = 0;
1196         }
1197
1198         /* Acquire ring specific lock */
1199         spin_lock_irqsave(&ha->hardware_lock, flags);
1200
1201         /* Check for room in outstanding command list. */
1202         handle = req->current_outstanding_cmd;
1203         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1204                 handle++;
1205                 if (handle == MAX_OUTSTANDING_COMMANDS)
1206                         handle = 1;
1207                 if (!req->outstanding_cmds[handle])
1208                         break;
1209         }
1210         if (index == MAX_OUTSTANDING_COMMANDS)
1211                 goto queuing_error;
1212
1213         /* Map the sg table so we have an accurate count of sg entries needed */
1214         if (scsi_sg_count(cmd)) {
1215                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1216                     scsi_sg_count(cmd), cmd->sc_data_direction);
1217                 if (unlikely(!nseg))
1218                         goto queuing_error;
1219         } else
1220                 nseg = 0;
1221
1222         tot_dsds = nseg;
1223
1224         req_cnt = qla24xx_calc_iocbs(tot_dsds);
1225         if (req->cnt < (req_cnt + 2)) {
1226                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1227
1228                 if (req->ring_index < cnt)
1229                         req->cnt = cnt - req->ring_index;
1230                 else
1231                         req->cnt = req->length -
1232                                 (req->ring_index - cnt);
1233         }
1234         if (req->cnt < (req_cnt + 2))
1235                 goto queuing_error;
1236
1237         /* Build command packet. */
1238         req->current_outstanding_cmd = handle;
1239         req->outstanding_cmds[handle] = sp;
1240         sp->handle = handle;
1241         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1242         req->cnt -= req_cnt;
1243
1244         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1245         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1246
1247         /* Zero out remaining portion of packet. */
1248         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1249         clr_ptr = (uint32_t *)cmd_pkt + 2;
1250         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1251         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1252
1253         /* Set NPORT-ID and LUN number*/
1254         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1255         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1256         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1257         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1258         cmd_pkt->vp_index = sp->fcport->vp_idx;
1259
1260         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1261         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1262
1263         /* Load SCSI command packet. */
1264         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1265         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1266
1267         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1268
1269         /* Build IOCB segments */
1270         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1271
1272         /* Set total data segment count. */
1273         cmd_pkt->entry_count = (uint8_t)req_cnt;
1274         /* Specify response queue number where completion should happen */
1275         cmd_pkt->entry_status = (uint8_t) rsp->id;
1276         wmb();
1277
1278         /* Adjust ring index. */
1279         req->ring_index++;
1280         if (req->ring_index == req->length) {
1281                 req->ring_index = 0;
1282                 req->ring_ptr = req->ring;
1283         } else
1284                 req->ring_ptr++;
1285
1286         sp->flags |= SRB_DMA_VALID;
1287
1288         /* Set chip new ring index. */
1289         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1290         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1291
1292         /* Manage unprocessed RIO/ZIO commands in response queue. */
1293         if (vha->flags.process_response_queue &&
1294                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1295                 qla24xx_process_response_queue(vha, rsp);
1296
1297         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1298         return QLA_SUCCESS;
1299
1300 queuing_error:
1301         if (tot_dsds)
1302                 scsi_dma_unmap(cmd);
1303
1304         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1305
1306         return QLA_FUNCTION_FAILED;
1307 }
1308
1309
1310 /**
1311  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1312  * @sp: command to send to the ISP
1313  *
1314  * Returns non-zero if a failure occurred, else zero.
1315  */
1316 int
1317 qla24xx_dif_start_scsi(srb_t *sp)
1318 {
1319         int                     nseg;
1320         unsigned long           flags;
1321         uint32_t                *clr_ptr;
1322         uint32_t                index;
1323         uint32_t                handle;
1324         uint16_t                cnt;
1325         uint16_t                req_cnt = 0;
1326         uint16_t                tot_dsds;
1327         uint16_t                tot_prot_dsds;
1328         uint16_t                fw_prot_opts = 0;
1329         struct req_que          *req = NULL;
1330         struct rsp_que          *rsp = NULL;
1331         struct scsi_cmnd        *cmd = sp->cmd;
1332         struct scsi_qla_host    *vha = sp->fcport->vha;
1333         struct qla_hw_data      *ha = vha->hw;
1334         struct cmd_type_crc_2   *cmd_pkt;
1335         uint32_t                status = 0;
1336
1337 #define QDSS_GOT_Q_SPACE        BIT_0
1338
1339         /* Only process protection or >16 cdb in this routine */
1340         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1341                 if (cmd->cmd_len <= 16)
1342                         return qla24xx_start_scsi(sp);
1343         }
1344
1345         /* Setup device pointers. */
1346
1347         qla25xx_set_que(sp, &rsp);
1348         req = vha->req;
1349
1350         /* So we know we haven't pci_map'ed anything yet */
1351         tot_dsds = 0;
1352
1353         /* Send marker if required */
1354         if (vha->marker_needed != 0) {
1355                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1356                     QLA_SUCCESS)
1357                         return QLA_FUNCTION_FAILED;
1358                 vha->marker_needed = 0;
1359         }
1360
1361         /* Acquire ring specific lock */
1362         spin_lock_irqsave(&ha->hardware_lock, flags);
1363
1364         /* Check for room in outstanding command list. */
1365         handle = req->current_outstanding_cmd;
1366         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1367                 handle++;
1368                 if (handle == MAX_OUTSTANDING_COMMANDS)
1369                         handle = 1;
1370                 if (!req->outstanding_cmds[handle])
1371                         break;
1372         }
1373
1374         if (index == MAX_OUTSTANDING_COMMANDS)
1375                 goto queuing_error;
1376
1377         /* Compute number of required data segments */
1378         /* Map the sg table so we have an accurate count of sg entries needed */
1379         if (scsi_sg_count(cmd)) {
1380                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1381                     scsi_sg_count(cmd), cmd->sc_data_direction);
1382                 if (unlikely(!nseg))
1383                         goto queuing_error;
1384                 else
1385                         sp->flags |= SRB_DMA_VALID;
1386         } else
1387                 nseg = 0;
1388
1389         /* number of required data segments */
1390         tot_dsds = nseg;
1391
1392         /* Compute number of required protection segments */
1393         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1394                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1395                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1396                 if (unlikely(!nseg))
1397                         goto queuing_error;
1398                 else
1399                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1400         } else {
1401                 nseg = 0;
1402         }
1403
1404         req_cnt = 1;
1405         /* Total Data and protection sg segment(s) */
1406         tot_prot_dsds = nseg;
1407         tot_dsds += nseg;
1408         if (req->cnt < (req_cnt + 2)) {
1409                 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1410
1411                 if (req->ring_index < cnt)
1412                         req->cnt = cnt - req->ring_index;
1413                 else
1414                         req->cnt = req->length -
1415                                 (req->ring_index - cnt);
1416         }
1417
1418         if (req->cnt < (req_cnt + 2))
1419                 goto queuing_error;
1420
1421         status |= QDSS_GOT_Q_SPACE;
1422
1423         /* Build header part of command packet (excluding the OPCODE). */
1424         req->current_outstanding_cmd = handle;
1425         req->outstanding_cmds[handle] = sp;
1426         sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1427         req->cnt -= req_cnt;
1428
1429         /* Fill-in common area */
1430         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1431         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1432
1433         clr_ptr = (uint32_t *)cmd_pkt + 2;
1434         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1435
1436         /* Set NPORT-ID and LUN number*/
1437         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1438         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1439         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1440         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1441
1442         int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1443         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1444
1445         /* Total Data and protection segment(s) */
1446         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1447
1448         /* Build IOCB segments and adjust for data protection segments */
1449         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1450             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1451                 QLA_SUCCESS)
1452                 goto queuing_error;
1453
1454         cmd_pkt->entry_count = (uint8_t)req_cnt;
1455         /* Specify response queue number where completion should happen */
1456         cmd_pkt->entry_status = (uint8_t) rsp->id;
1457         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1458         wmb();
1459
1460         /* Adjust ring index. */
1461         req->ring_index++;
1462         if (req->ring_index == req->length) {
1463                 req->ring_index = 0;
1464                 req->ring_ptr = req->ring;
1465         } else
1466                 req->ring_ptr++;
1467
1468         /* Set chip new ring index. */
1469         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1470         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1471
1472         /* Manage unprocessed RIO/ZIO commands in response queue. */
1473         if (vha->flags.process_response_queue &&
1474             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1475                 qla24xx_process_response_queue(vha, rsp);
1476
1477         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1478
1479         return QLA_SUCCESS;
1480
1481 queuing_error:
1482         if (status & QDSS_GOT_Q_SPACE) {
1483                 req->outstanding_cmds[handle] = NULL;
1484                 req->cnt += req_cnt;
1485         }
1486         /* Cleanup will be performed by the caller (queuecommand) */
1487
1488         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1489
1490         DEBUG18(qla_printk(KERN_INFO, ha,
1491             "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
1492         return QLA_FUNCTION_FAILED;
1493 }
1494
1495
1496 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1497 {
1498         struct scsi_cmnd *cmd = sp->cmd;
1499         struct qla_hw_data *ha = sp->fcport->vha->hw;
1500         int affinity = cmd->request->cpu;
1501
1502         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1503                 affinity < ha->max_rsp_queues - 1)
1504                 *rsp = ha->rsp_q_map[affinity + 1];
1505          else
1506                 *rsp = ha->rsp_q_map[0];
1507 }
1508
1509 /* Generic Control-SRB manipulation functions. */
1510 void *
1511 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1512 {
1513         struct qla_hw_data *ha = vha->hw;
1514         struct req_que *req = ha->req_q_map[0];
1515         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1516         uint32_t index, handle;
1517         request_t *pkt;
1518         uint16_t cnt, req_cnt;
1519
1520         pkt = NULL;
1521         req_cnt = 1;
1522         handle = 0;
1523
1524         if (!sp)
1525                 goto skip_cmd_array;
1526
1527         /* Check for room in outstanding command list. */
1528         handle = req->current_outstanding_cmd;
1529         for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1530                 handle++;
1531                 if (handle == MAX_OUTSTANDING_COMMANDS)
1532                         handle = 1;
1533                 if (!req->outstanding_cmds[handle])
1534                         break;
1535         }
1536         if (index == MAX_OUTSTANDING_COMMANDS)
1537                 goto queuing_error;
1538
1539         /* Prep command array. */
1540         req->current_outstanding_cmd = handle;
1541         req->outstanding_cmds[handle] = sp;
1542         sp->handle = handle;
1543
1544 skip_cmd_array:
1545         /* Check for room on request queue. */
1546         if (req->cnt < req_cnt) {
1547                 if (ha->mqenable)
1548                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1549                 else if (IS_QLA82XX(ha))
1550                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1551                 else if (IS_FWI2_CAPABLE(ha))
1552                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1553                 else
1554                         cnt = qla2x00_debounce_register(
1555                             ISP_REQ_Q_OUT(ha, &reg->isp));
1556
1557                 if  (req->ring_index < cnt)
1558                         req->cnt = cnt - req->ring_index;
1559                 else
1560                         req->cnt = req->length -
1561                             (req->ring_index - cnt);
1562         }
1563         if (req->cnt < req_cnt)
1564                 goto queuing_error;
1565
1566         /* Prep packet */
1567         req->cnt -= req_cnt;
1568         pkt = req->ring_ptr;
1569         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1570         pkt->entry_count = req_cnt;
1571         pkt->handle = handle;
1572
1573 queuing_error:
1574         return pkt;
1575 }
1576
1577 static void
1578 qla2x00_start_iocbs(srb_t *sp)
1579 {
1580         struct qla_hw_data *ha = sp->fcport->vha->hw;
1581         struct req_que *req = ha->req_q_map[0];
1582         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1583         struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1584
1585         if (IS_QLA82XX(ha)) {
1586                 qla82xx_start_iocbs(sp);
1587         } else {
1588                 /* Adjust ring index. */
1589                 req->ring_index++;
1590                 if (req->ring_index == req->length) {
1591                         req->ring_index = 0;
1592                         req->ring_ptr = req->ring;
1593                 } else
1594                         req->ring_ptr++;
1595
1596                 /* Set chip new ring index. */
1597                 if (ha->mqenable) {
1598                         WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1599                         RD_REG_DWORD(&ioreg->hccr);
1600                 } else if (IS_QLA82XX(ha)) {
1601                         qla82xx_start_iocbs(sp);
1602                 } else if (IS_FWI2_CAPABLE(ha)) {
1603                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1604                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1605                 } else {
1606                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1607                                 req->ring_index);
1608                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1609                 }
1610         }
1611 }
1612
1613 static void
1614 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1615 {
1616         struct srb_ctx *ctx = sp->ctx;
1617         struct srb_iocb *lio = ctx->u.iocb_cmd;
1618
1619         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1620         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1621         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1622                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1623         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1624                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1625         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1626         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1627         logio->port_id[1] = sp->fcport->d_id.b.area;
1628         logio->port_id[2] = sp->fcport->d_id.b.domain;
1629         logio->vp_index = sp->fcport->vp_idx;
1630 }
1631
1632 static void
1633 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1634 {
1635         struct qla_hw_data *ha = sp->fcport->vha->hw;
1636         struct srb_ctx *ctx = sp->ctx;
1637         struct srb_iocb *lio = ctx->u.iocb_cmd;
1638         uint16_t opts;
1639
1640         mbx->entry_type = MBX_IOCB_TYPE;
1641         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1642         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1643         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1644         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1645         if (HAS_EXTENDED_IDS(ha)) {
1646                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1647                 mbx->mb10 = cpu_to_le16(opts);
1648         } else {
1649                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1650         }
1651         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1652         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1653             sp->fcport->d_id.b.al_pa);
1654         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1655 }
1656
1657 static void
1658 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1659 {
1660         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1661         logio->control_flags =
1662             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1663         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1664         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1665         logio->port_id[1] = sp->fcport->d_id.b.area;
1666         logio->port_id[2] = sp->fcport->d_id.b.domain;
1667         logio->vp_index = sp->fcport->vp_idx;
1668 }
1669
1670 static void
1671 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1672 {
1673         struct qla_hw_data *ha = sp->fcport->vha->hw;
1674
1675         mbx->entry_type = MBX_IOCB_TYPE;
1676         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1677         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1678         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1679             cpu_to_le16(sp->fcport->loop_id):
1680             cpu_to_le16(sp->fcport->loop_id << 8);
1681         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1682         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1683             sp->fcport->d_id.b.al_pa);
1684         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1685         /* Implicit: mbx->mbx10 = 0. */
1686 }
1687
1688 static void
1689 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1690 {
1691         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1692         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1693         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1694         logio->vp_index = sp->fcport->vp_idx;
1695 }
1696
1697 static void
1698 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1699 {
1700         struct qla_hw_data *ha = sp->fcport->vha->hw;
1701
1702         mbx->entry_type = MBX_IOCB_TYPE;
1703         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1704         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1705         if (HAS_EXTENDED_IDS(ha)) {
1706                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1707                 mbx->mb10 = cpu_to_le16(BIT_0);
1708         } else {
1709                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1710         }
1711         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1712         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1713         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1714         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1715         mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1716 }
1717
1718 static void
1719 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1720 {
1721         uint32_t flags;
1722         unsigned int lun;
1723         struct fc_port *fcport = sp->fcport;
1724         scsi_qla_host_t *vha = fcport->vha;
1725         struct qla_hw_data *ha = vha->hw;
1726         struct srb_ctx *ctx = sp->ctx;
1727         struct srb_iocb *iocb = ctx->u.iocb_cmd;
1728         struct req_que *req = vha->req;
1729
1730         flags = iocb->u.tmf.flags;
1731         lun = iocb->u.tmf.lun;
1732
1733         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1734         tsk->entry_count = 1;
1735         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1736         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1737         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1738         tsk->control_flags = cpu_to_le32(flags);
1739         tsk->port_id[0] = fcport->d_id.b.al_pa;
1740         tsk->port_id[1] = fcport->d_id.b.area;
1741         tsk->port_id[2] = fcport->d_id.b.domain;
1742         tsk->vp_index = fcport->vp_idx;
1743
1744         if (flags == TCF_LUN_RESET) {
1745                 int_to_scsilun(lun, &tsk->lun);
1746                 host_to_fcp_swap((uint8_t *)&tsk->lun,
1747                         sizeof(tsk->lun));
1748         }
1749 }
1750
1751 static void
1752 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1753 {
1754         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1755
1756         els_iocb->entry_type = ELS_IOCB_TYPE;
1757         els_iocb->entry_count = 1;
1758         els_iocb->sys_define = 0;
1759         els_iocb->entry_status = 0;
1760         els_iocb->handle = sp->handle;
1761         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1762         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1763         els_iocb->vp_index = sp->fcport->vp_idx;
1764         els_iocb->sof_type = EST_SOFI3;
1765         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1766
1767         els_iocb->opcode =
1768             (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
1769             bsg_job->request->rqst_data.r_els.els_code :
1770             bsg_job->request->rqst_data.h_els.command_code;
1771         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1772         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1773         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1774         els_iocb->control_flags = 0;
1775         els_iocb->rx_byte_count =
1776             cpu_to_le32(bsg_job->reply_payload.payload_len);
1777         els_iocb->tx_byte_count =
1778             cpu_to_le32(bsg_job->request_payload.payload_len);
1779
1780         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1781             (bsg_job->request_payload.sg_list)));
1782         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
1783             (bsg_job->request_payload.sg_list)));
1784         els_iocb->tx_len = cpu_to_le32(sg_dma_len
1785             (bsg_job->request_payload.sg_list));
1786
1787         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
1788             (bsg_job->reply_payload.sg_list)));
1789         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
1790             (bsg_job->reply_payload.sg_list)));
1791         els_iocb->rx_len = cpu_to_le32(sg_dma_len
1792             (bsg_job->reply_payload.sg_list));
1793 }
1794
1795 static void
1796 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
1797 {
1798         uint16_t        avail_dsds;
1799         uint32_t        *cur_dsd;
1800         struct scatterlist *sg;
1801         int index;
1802         uint16_t tot_dsds;
1803         scsi_qla_host_t *vha = sp->fcport->vha;
1804         struct qla_hw_data *ha = vha->hw;
1805         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1806         int loop_iterartion = 0;
1807         int cont_iocb_prsnt = 0;
1808         int entry_count = 1;
1809
1810         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
1811         ct_iocb->entry_type = CT_IOCB_TYPE;
1812         ct_iocb->entry_status = 0;
1813         ct_iocb->handle1 = sp->handle;
1814         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
1815         ct_iocb->status = __constant_cpu_to_le16(0);
1816         ct_iocb->control_flags = __constant_cpu_to_le16(0);
1817         ct_iocb->timeout = 0;
1818         ct_iocb->cmd_dsd_count =
1819             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1820         ct_iocb->total_dsd_count =
1821             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
1822         ct_iocb->req_bytecount =
1823             cpu_to_le32(bsg_job->request_payload.payload_len);
1824         ct_iocb->rsp_bytecount =
1825             cpu_to_le32(bsg_job->reply_payload.payload_len);
1826
1827         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
1828             (bsg_job->request_payload.sg_list)));
1829         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
1830             (bsg_job->request_payload.sg_list)));
1831         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
1832
1833         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
1834             (bsg_job->reply_payload.sg_list)));
1835         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
1836             (bsg_job->reply_payload.sg_list)));
1837         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
1838
1839         avail_dsds = 1;
1840         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
1841         index = 0;
1842         tot_dsds = bsg_job->reply_payload.sg_cnt;
1843
1844         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1845                 dma_addr_t       sle_dma;
1846                 cont_a64_entry_t *cont_pkt;
1847
1848                 /* Allocate additional continuation packets? */
1849                 if (avail_dsds == 0) {
1850                         /*
1851                         * Five DSDs are available in the Cont.
1852                         * Type 1 IOCB.
1853                                */
1854                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1855                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1856                         avail_dsds = 5;
1857                         cont_iocb_prsnt = 1;
1858                         entry_count++;
1859                 }
1860
1861                 sle_dma = sg_dma_address(sg);
1862                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
1863                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
1864                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
1865                 loop_iterartion++;
1866                 avail_dsds--;
1867         }
1868         ct_iocb->entry_count = entry_count;
1869 }
1870
1871 static void
1872 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
1873 {
1874         uint16_t        avail_dsds;
1875         uint32_t        *cur_dsd;
1876         struct scatterlist *sg;
1877         int index;
1878         uint16_t tot_dsds;
1879         scsi_qla_host_t *vha = sp->fcport->vha;
1880         struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
1881         int loop_iterartion = 0;
1882         int cont_iocb_prsnt = 0;
1883         int entry_count = 1;
1884
1885         ct_iocb->entry_type = CT_IOCB_TYPE;
1886         ct_iocb->entry_status = 0;
1887         ct_iocb->sys_define = 0;
1888         ct_iocb->handle = sp->handle;
1889
1890         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1891         ct_iocb->vp_index = sp->fcport->vp_idx;
1892         ct_iocb->comp_status = __constant_cpu_to_le16(0);
1893
1894         ct_iocb->cmd_dsd_count =
1895             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1896         ct_iocb->timeout = 0;
1897         ct_iocb->rsp_dsd_count =
1898             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1899         ct_iocb->rsp_byte_count =
1900             cpu_to_le32(bsg_job->reply_payload.payload_len);
1901         ct_iocb->cmd_byte_count =
1902             cpu_to_le32(bsg_job->request_payload.payload_len);
1903         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
1904             (bsg_job->request_payload.sg_list)));
1905         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
1906            (bsg_job->request_payload.sg_list)));
1907         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
1908             (bsg_job->request_payload.sg_list));
1909
1910         avail_dsds = 1;
1911         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
1912         index = 0;
1913         tot_dsds = bsg_job->reply_payload.sg_cnt;
1914
1915         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
1916                 dma_addr_t       sle_dma;
1917                 cont_a64_entry_t *cont_pkt;
1918
1919                 /* Allocate additional continuation packets? */
1920                 if (avail_dsds == 0) {
1921                         /*
1922                         * Five DSDs are available in the Cont.
1923                         * Type 1 IOCB.
1924                                */
1925                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
1926                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
1927                         avail_dsds = 5;
1928                         cont_iocb_prsnt = 1;
1929                         entry_count++;
1930                 }
1931
1932                 sle_dma = sg_dma_address(sg);
1933                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
1934                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
1935                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
1936                 loop_iterartion++;
1937                 avail_dsds--;
1938         }
1939         ct_iocb->entry_count = entry_count;
1940 }
1941
1942 int
1943 qla2x00_start_sp(srb_t *sp)
1944 {
1945         int rval;
1946         struct qla_hw_data *ha = sp->fcport->vha->hw;
1947         void *pkt;
1948         struct srb_ctx *ctx = sp->ctx;
1949         unsigned long flags;
1950
1951         rval = QLA_FUNCTION_FAILED;
1952         spin_lock_irqsave(&ha->hardware_lock, flags);
1953         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
1954         if (!pkt)
1955                 goto done;
1956
1957         rval = QLA_SUCCESS;
1958         switch (ctx->type) {
1959         case SRB_LOGIN_CMD:
1960                 IS_FWI2_CAPABLE(ha) ?
1961                     qla24xx_login_iocb(sp, pkt) :
1962                     qla2x00_login_iocb(sp, pkt);
1963                 break;
1964         case SRB_LOGOUT_CMD:
1965                 IS_FWI2_CAPABLE(ha) ?
1966                     qla24xx_logout_iocb(sp, pkt) :
1967                     qla2x00_logout_iocb(sp, pkt);
1968                 break;
1969         case SRB_ELS_CMD_RPT:
1970         case SRB_ELS_CMD_HST:
1971                 qla24xx_els_iocb(sp, pkt);
1972                 break;
1973         case SRB_CT_CMD:
1974                 IS_FWI2_CAPABLE(ha) ?
1975                 qla24xx_ct_iocb(sp, pkt) :
1976                 qla2x00_ct_iocb(sp, pkt);
1977                 break;
1978         case SRB_ADISC_CMD:
1979                 IS_FWI2_CAPABLE(ha) ?
1980                     qla24xx_adisc_iocb(sp, pkt) :
1981                     qla2x00_adisc_iocb(sp, pkt);
1982                 break;
1983         case SRB_TM_CMD:
1984                 qla24xx_tm_iocb(sp, pkt);
1985                 break;
1986         default:
1987                 break;
1988         }
1989
1990         wmb();
1991         qla2x00_start_iocbs(sp);
1992 done:
1993         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1994         return rval;
1995 }