2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2012 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
13 #include <scsi/scsi_tcq.h>
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
17 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20 * Returns the proper CF_* direction based on CDB.
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
26 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27 struct scsi_qla_host *vha = sp->fcport->vha;
31 /* Set transfer direction */
32 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
34 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
46 * @dsds: number of data segment decriptors needed
48 * Returns the number of IOCB entries needed to store @dsds.
51 qla2x00_calc_iocbs_32(uint16_t dsds)
57 iocbs += (dsds - 3) / 7;
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
68 * @dsds: number of data segment decriptors needed
70 * Returns the number of IOCB entries needed to store @dsds.
73 qla2x00_calc_iocbs_64(uint16_t dsds)
79 iocbs += (dsds - 2) / 5;
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 cont_entry_t *cont_pkt;
96 struct req_que *req = vha->req;
97 /* Adjust ring index. */
99 if (req->ring_index == req->length) {
101 req->ring_ptr = req->ring;
106 cont_pkt = (cont_entry_t *)req->ring_ptr;
108 /* Load packet defaults. */
109 *((uint32_t *)(&cont_pkt->entry_type)) =
110 __constant_cpu_to_le32(CONTINUE_TYPE);
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119 * Returns a pointer to the continuation type 1 IOCB packet.
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 cont_a64_entry_t *cont_pkt;
126 /* Adjust ring index. */
128 if (req->ring_index == req->length) {
130 req->ring_ptr = req->ring;
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt->entry_type)) =
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
150 /* We always use DIFF Bundling for best performance */
153 /* Translate SCSI opcode to a protection opcode */
154 switch (scsi_get_prot_op(cmd)) {
155 case SCSI_PROT_READ_STRIP:
156 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 case SCSI_PROT_WRITE_INSERT:
159 *fw_prot_opts |= PO_MODE_DIF_INSERT;
161 case SCSI_PROT_READ_INSERT:
162 *fw_prot_opts |= PO_MODE_DIF_INSERT;
164 case SCSI_PROT_WRITE_STRIP:
165 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 case SCSI_PROT_READ_PASS:
168 case SCSI_PROT_WRITE_PASS:
169 if (guard & SHOST_DIX_GUARD_IP)
170 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 *fw_prot_opts |= PO_MODE_DIF_PASS;
174 default: /* Normal Request */
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 return scsi_prot_sg_count(cmd);
183 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
184 * capable IOCB types.
186 * @sp: SRB command to process
187 * @cmd_pkt: Command type 2 IOCB
188 * @tot_dsds: Total number of segments to transfer
190 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
195 scsi_qla_host_t *vha;
196 struct scsi_cmnd *cmd;
197 struct scatterlist *sg;
200 cmd = GET_CMD_SP(sp);
202 /* Update entry type to indicate Command Type 2 IOCB */
203 *((uint32_t *)(&cmd_pkt->entry_type)) =
204 __constant_cpu_to_le32(COMMAND_TYPE);
206 /* No data transfer */
207 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
208 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
212 vha = sp->fcport->vha;
213 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215 /* Three DSDs are available in the Command Type 2 IOCB */
217 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219 /* Load data segments */
220 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
221 cont_entry_t *cont_pkt;
223 /* Allocate additional continuation packets? */
224 if (avail_dsds == 0) {
226 * Seven DSDs are available in the Continuation
229 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
230 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
234 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
235 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
241 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
242 * capable IOCB types.
244 * @sp: SRB command to process
245 * @cmd_pkt: Command type 3 IOCB
246 * @tot_dsds: Total number of segments to transfer
248 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
253 scsi_qla_host_t *vha;
254 struct scsi_cmnd *cmd;
255 struct scatterlist *sg;
258 cmd = GET_CMD_SP(sp);
260 /* Update entry type to indicate Command Type 3 IOCB */
261 *((uint32_t *)(&cmd_pkt->entry_type)) =
262 __constant_cpu_to_le32(COMMAND_A64_TYPE);
264 /* No data transfer */
265 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
266 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
270 vha = sp->fcport->vha;
271 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273 /* Two DSDs are available in the Command Type 3 IOCB */
275 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277 /* Load data segments */
278 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280 cont_a64_entry_t *cont_pkt;
282 /* Allocate additional continuation packets? */
283 if (avail_dsds == 0) {
285 * Five DSDs are available in the Continuation
288 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
289 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
293 sle_dma = sg_dma_address(sg);
294 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
302 * qla2x00_start_scsi() - Send a SCSI command to the ISP
303 * @sp: command to send to the ISP
305 * Returns non-zero if a failure occurred, else zero.
308 qla2x00_start_scsi(srb_t *sp)
312 scsi_qla_host_t *vha;
313 struct scsi_cmnd *cmd;
317 cmd_entry_t *cmd_pkt;
321 struct device_reg_2xxx __iomem *reg;
322 struct qla_hw_data *ha;
327 /* Setup device pointers. */
329 vha = sp->fcport->vha;
331 reg = &ha->iobase->isp;
332 cmd = GET_CMD_SP(sp);
333 req = ha->req_q_map[0];
334 rsp = ha->rsp_q_map[0];
335 /* So we know we haven't pci_map'ed anything yet */
338 /* Send marker if required */
339 if (vha->marker_needed != 0) {
340 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
342 return (QLA_FUNCTION_FAILED);
344 vha->marker_needed = 0;
347 /* Acquire ring specific lock */
348 spin_lock_irqsave(&ha->hardware_lock, flags);
350 /* Check for room in outstanding command list. */
351 handle = req->current_outstanding_cmd;
352 for (index = 1; index < req->num_outstanding_cmds; index++) {
354 if (handle == req->num_outstanding_cmds)
356 if (!req->outstanding_cmds[handle])
359 if (index == req->num_outstanding_cmds)
362 /* Map the sg table so we have an accurate count of sg entries needed */
363 if (scsi_sg_count(cmd)) {
364 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
365 scsi_sg_count(cmd), cmd->sc_data_direction);
373 /* Calculate the number of request entries needed. */
374 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
375 if (req->cnt < (req_cnt + 2)) {
376 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
377 if (req->ring_index < cnt)
378 req->cnt = cnt - req->ring_index;
380 req->cnt = req->length -
381 (req->ring_index - cnt);
382 /* If still no head room then bail out */
383 if (req->cnt < (req_cnt + 2))
387 /* Build command packet */
388 req->current_outstanding_cmd = handle;
389 req->outstanding_cmds[handle] = sp;
391 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
394 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
395 cmd_pkt->handle = handle;
396 /* Zero out remaining portion of packet. */
397 clr_ptr = (uint32_t *)cmd_pkt + 2;
398 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
399 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
401 /* Set target ID and LUN number*/
402 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
403 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
405 /* Update tagged queuing modifier */
406 if (scsi_populate_tag_msg(cmd, tag)) {
408 case HEAD_OF_QUEUE_TAG:
409 cmd_pkt->control_flags =
410 __constant_cpu_to_le16(CF_HEAD_TAG);
412 case ORDERED_QUEUE_TAG:
413 cmd_pkt->control_flags =
414 __constant_cpu_to_le16(CF_ORDERED_TAG);
417 cmd_pkt->control_flags =
418 __constant_cpu_to_le16(CF_SIMPLE_TAG);
423 /* Load SCSI command packet. */
424 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
425 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
427 /* Build IOCB segments */
428 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
430 /* Set total data segment count. */
431 cmd_pkt->entry_count = (uint8_t)req_cnt;
434 /* Adjust ring index. */
436 if (req->ring_index == req->length) {
438 req->ring_ptr = req->ring;
442 sp->flags |= SRB_DMA_VALID;
444 /* Set chip new ring index. */
445 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
446 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
448 /* Manage unprocessed RIO/ZIO commands in response queue. */
449 if (vha->flags.process_response_queue &&
450 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
451 qla2x00_process_response_queue(rsp);
453 spin_unlock_irqrestore(&ha->hardware_lock, flags);
454 return (QLA_SUCCESS);
460 spin_unlock_irqrestore(&ha->hardware_lock, flags);
462 return (QLA_FUNCTION_FAILED);
466 * qla2x00_start_iocbs() - Execute the IOCB command
469 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
471 struct qla_hw_data *ha = vha->hw;
472 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
474 if (IS_QLA82XX(ha)) {
475 qla82xx_start_iocbs(vha);
477 /* Adjust ring index. */
479 if (req->ring_index == req->length) {
481 req->ring_ptr = req->ring;
485 /* Set chip new ring index. */
486 if (ha->mqenable || IS_QLA83XX(ha)) {
487 WRT_REG_DWORD(req->req_q_in, req->ring_index);
488 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
489 } else if (IS_FWI2_CAPABLE(ha)) {
490 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
491 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
493 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
495 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
501 * qla2x00_marker() - Send a marker IOCB to the firmware.
505 * @type: marker modifier
507 * Can be called from both normal and interrupt context.
509 * Returns non-zero if a failure occurred, else zero.
512 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
513 struct rsp_que *rsp, uint16_t loop_id,
514 uint16_t lun, uint8_t type)
517 struct mrk_entry_24xx *mrk24;
518 struct qla_hw_data *ha = vha->hw;
519 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
522 req = ha->req_q_map[0];
523 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
525 ql_log(ql_log_warn, base_vha, 0x3026,
526 "Failed to allocate Marker IOCB.\n");
528 return (QLA_FUNCTION_FAILED);
531 mrk->entry_type = MARKER_TYPE;
532 mrk->modifier = type;
533 if (type != MK_SYNC_ALL) {
534 if (IS_FWI2_CAPABLE(ha)) {
535 mrk24 = (struct mrk_entry_24xx *) mrk;
536 mrk24->nport_handle = cpu_to_le16(loop_id);
537 mrk24->lun[1] = LSB(lun);
538 mrk24->lun[2] = MSB(lun);
539 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
540 mrk24->vp_index = vha->vp_idx;
541 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
543 SET_TARGET_ID(ha, mrk->target, loop_id);
544 mrk->lun = cpu_to_le16(lun);
549 qla2x00_start_iocbs(vha, req);
551 return (QLA_SUCCESS);
555 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
556 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
560 unsigned long flags = 0;
562 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
563 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
564 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
570 * qla2x00_issue_marker
573 * Caller CAN have hardware lock held as specified by ha_locked parameter.
574 * Might release it, then reaquire.
576 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
579 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
580 MK_SYNC_ALL) != QLA_SUCCESS)
581 return QLA_FUNCTION_FAILED;
583 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
584 MK_SYNC_ALL) != QLA_SUCCESS)
585 return QLA_FUNCTION_FAILED;
587 vha->marker_needed = 0;
593 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
594 * Continuation Type 1 IOCBs to allocate.
596 * @dsds: number of data segment decriptors needed
598 * Returns the number of IOCB entries needed to store @dsds.
601 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
607 iocbs += (dsds - 1) / 5;
615 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
618 uint32_t *cur_dsd = NULL;
619 scsi_qla_host_t *vha;
620 struct qla_hw_data *ha;
621 struct scsi_cmnd *cmd;
622 struct scatterlist *cur_seg;
626 uint8_t first_iocb = 1;
627 uint32_t dsd_list_len;
628 struct dsd_dma *dsd_ptr;
631 cmd = GET_CMD_SP(sp);
633 /* Update entry type to indicate Command Type 3 IOCB */
634 *((uint32_t *)(&cmd_pkt->entry_type)) =
635 __constant_cpu_to_le32(COMMAND_TYPE_6);
637 /* No data transfer */
638 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
639 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
643 vha = sp->fcport->vha;
646 /* Set transfer direction */
647 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
648 cmd_pkt->control_flags =
649 __constant_cpu_to_le16(CF_WRITE_DATA);
650 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
651 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
652 cmd_pkt->control_flags =
653 __constant_cpu_to_le16(CF_READ_DATA);
654 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
657 cur_seg = scsi_sglist(cmd);
658 ctx = GET_CMD_CTX_SP(sp);
661 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
662 QLA_DSDS_PER_IOCB : tot_dsds;
663 tot_dsds -= avail_dsds;
664 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
666 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
667 struct dsd_dma, list);
668 next_dsd = dsd_ptr->dsd_addr;
669 list_del(&dsd_ptr->list);
671 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
677 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
678 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
679 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
680 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
682 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
683 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
684 *cur_dsd++ = cpu_to_le32(dsd_list_len);
686 cur_dsd = (uint32_t *)next_dsd;
690 sle_dma = sg_dma_address(cur_seg);
691 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
692 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
693 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
694 cur_seg = sg_next(cur_seg);
699 /* Null termination */
703 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
708 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
709 * for Command Type 6.
711 * @dsds: number of data segment decriptors needed
713 * Returns the number of dsd list needed to store @dsds.
716 qla24xx_calc_dsd_lists(uint16_t dsds)
718 uint16_t dsd_lists = 0;
720 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
721 if (dsds % QLA_DSDS_PER_IOCB)
728 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
731 * @sp: SRB command to process
732 * @cmd_pkt: Command type 3 IOCB
733 * @tot_dsds: Total number of segments to transfer
736 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
741 scsi_qla_host_t *vha;
742 struct scsi_cmnd *cmd;
743 struct scatterlist *sg;
747 cmd = GET_CMD_SP(sp);
749 /* Update entry type to indicate Command Type 3 IOCB */
750 *((uint32_t *)(&cmd_pkt->entry_type)) =
751 __constant_cpu_to_le32(COMMAND_TYPE_7);
753 /* No data transfer */
754 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
755 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
759 vha = sp->fcport->vha;
762 /* Set transfer direction */
763 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
764 cmd_pkt->task_mgmt_flags =
765 __constant_cpu_to_le16(TMF_WRITE_DATA);
766 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
767 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
768 cmd_pkt->task_mgmt_flags =
769 __constant_cpu_to_le16(TMF_READ_DATA);
770 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
773 /* One DSD is available in the Command Type 3 IOCB */
775 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
777 /* Load data segments */
779 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
781 cont_a64_entry_t *cont_pkt;
783 /* Allocate additional continuation packets? */
784 if (avail_dsds == 0) {
786 * Five DSDs are available in the Continuation
789 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
790 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
794 sle_dma = sg_dma_address(sg);
795 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
796 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
797 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
802 struct fw_dif_context {
805 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
806 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
810 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
814 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
815 unsigned int protcnt)
817 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
819 switch (scsi_get_prot_type(cmd)) {
820 case SCSI_PROT_DIF_TYPE0:
822 * No check for ql2xenablehba_err_chk, as it would be an
823 * I/O error if hba tag generation is not done.
825 pkt->ref_tag = cpu_to_le32((uint32_t)
826 (0xffffffff & scsi_get_lba(cmd)));
828 if (!qla2x00_hba_err_chk_enabled(sp))
831 pkt->ref_tag_mask[0] = 0xff;
832 pkt->ref_tag_mask[1] = 0xff;
833 pkt->ref_tag_mask[2] = 0xff;
834 pkt->ref_tag_mask[3] = 0xff;
838 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
839 * match LBA in CDB + N
841 case SCSI_PROT_DIF_TYPE2:
842 pkt->app_tag = __constant_cpu_to_le16(0);
843 pkt->app_tag_mask[0] = 0x0;
844 pkt->app_tag_mask[1] = 0x0;
846 pkt->ref_tag = cpu_to_le32((uint32_t)
847 (0xffffffff & scsi_get_lba(cmd)));
849 if (!qla2x00_hba_err_chk_enabled(sp))
852 /* enable ALL bytes of the ref tag */
853 pkt->ref_tag_mask[0] = 0xff;
854 pkt->ref_tag_mask[1] = 0xff;
855 pkt->ref_tag_mask[2] = 0xff;
856 pkt->ref_tag_mask[3] = 0xff;
859 /* For Type 3 protection: 16 bit GUARD only */
860 case SCSI_PROT_DIF_TYPE3:
861 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
862 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
867 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
870 case SCSI_PROT_DIF_TYPE1:
871 pkt->ref_tag = cpu_to_le32((uint32_t)
872 (0xffffffff & scsi_get_lba(cmd)));
873 pkt->app_tag = __constant_cpu_to_le16(0);
874 pkt->app_tag_mask[0] = 0x0;
875 pkt->app_tag_mask[1] = 0x0;
877 if (!qla2x00_hba_err_chk_enabled(sp))
880 /* enable ALL bytes of the ref tag */
881 pkt->ref_tag_mask[0] = 0xff;
882 pkt->ref_tag_mask[1] = 0xff;
883 pkt->ref_tag_mask[2] = 0xff;
884 pkt->ref_tag_mask[3] = 0xff;
890 dma_addr_t dma_addr; /* OUT */
891 uint32_t dma_len; /* OUT */
893 uint32_t tot_bytes; /* IN */
894 struct scatterlist *cur_sg; /* IN */
896 /* for book keeping, bzero on initial invocation */
897 uint32_t bytes_consumed;
899 uint32_t tot_partial;
907 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
910 struct scatterlist *sg;
911 uint32_t cumulative_partial, sg_len;
912 dma_addr_t sg_dma_addr;
914 if (sgx->num_bytes == sgx->tot_bytes)
918 cumulative_partial = sgx->tot_partial;
920 sg_dma_addr = sg_dma_address(sg);
921 sg_len = sg_dma_len(sg);
923 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
925 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
926 sgx->dma_len = (blk_sz - cumulative_partial);
927 sgx->tot_partial = 0;
928 sgx->num_bytes += blk_sz;
931 sgx->dma_len = sg_len - sgx->bytes_consumed;
932 sgx->tot_partial += sgx->dma_len;
936 sgx->bytes_consumed += sgx->dma_len;
938 if (sg_len == sgx->bytes_consumed) {
942 sgx->bytes_consumed = 0;
949 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
950 uint32_t *dsd, uint16_t tot_dsds)
953 uint8_t avail_dsds = 0;
954 uint32_t dsd_list_len;
955 struct dsd_dma *dsd_ptr;
956 struct scatterlist *sg_prot;
957 uint32_t *cur_dsd = dsd;
958 uint16_t used_dsds = tot_dsds;
964 uint32_t sle_dma_len, tot_prot_dma_len = 0;
965 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
967 prot_int = cmd->device->sector_size;
969 memset(&sgx, 0, sizeof(struct qla2_sgx));
970 sgx.tot_bytes = scsi_bufflen(cmd);
971 sgx.cur_sg = scsi_sglist(cmd);
974 sg_prot = scsi_prot_sglist(cmd);
976 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
978 sle_dma = sgx.dma_addr;
979 sle_dma_len = sgx.dma_len;
981 /* Allocate additional continuation packets? */
982 if (avail_dsds == 0) {
983 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
984 QLA_DSDS_PER_IOCB : used_dsds;
985 dsd_list_len = (avail_dsds + 1) * 12;
986 used_dsds -= avail_dsds;
988 /* allocate tracking DS */
989 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
993 /* allocate new list */
994 dsd_ptr->dsd_addr = next_dsd =
995 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
996 &dsd_ptr->dsd_list_dma);
1000 * Need to cleanup only this dsd_ptr, rest
1001 * will be done by sp_free_dma()
1007 list_add_tail(&dsd_ptr->list,
1008 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1010 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1012 /* add new list to cmd iocb or last list */
1013 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1014 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1015 *cur_dsd++ = dsd_list_len;
1016 cur_dsd = (uint32_t *)next_dsd;
1018 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1019 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1020 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1024 /* Got a full protection interval */
1025 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1028 tot_prot_dma_len += sle_dma_len;
1029 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1030 tot_prot_dma_len = 0;
1031 sg_prot = sg_next(sg_prot);
1034 partial = 1; /* So as to not re-enter this block */
1035 goto alloc_and_fill;
1038 /* Null termination */
1046 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1050 uint8_t avail_dsds = 0;
1051 uint32_t dsd_list_len;
1052 struct dsd_dma *dsd_ptr;
1053 struct scatterlist *sg;
1054 uint32_t *cur_dsd = dsd;
1056 uint16_t used_dsds = tot_dsds;
1057 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1059 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1062 /* Allocate additional continuation packets? */
1063 if (avail_dsds == 0) {
1064 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1065 QLA_DSDS_PER_IOCB : used_dsds;
1066 dsd_list_len = (avail_dsds + 1) * 12;
1067 used_dsds -= avail_dsds;
1069 /* allocate tracking DS */
1070 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1074 /* allocate new list */
1075 dsd_ptr->dsd_addr = next_dsd =
1076 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1077 &dsd_ptr->dsd_list_dma);
1081 * Need to cleanup only this dsd_ptr, rest
1082 * will be done by sp_free_dma()
1088 list_add_tail(&dsd_ptr->list,
1089 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1091 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1093 /* add new list to cmd iocb or last list */
1094 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1095 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1096 *cur_dsd++ = dsd_list_len;
1097 cur_dsd = (uint32_t *)next_dsd;
1099 sle_dma = sg_dma_address(sg);
1101 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1102 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1103 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1107 /* Null termination */
1115 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1120 uint8_t avail_dsds = 0;
1121 uint32_t dsd_list_len;
1122 struct dsd_dma *dsd_ptr;
1123 struct scatterlist *sg;
1125 struct scsi_cmnd *cmd;
1126 uint32_t *cur_dsd = dsd;
1127 uint16_t used_dsds = tot_dsds;
1129 cmd = GET_CMD_SP(sp);
1130 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1133 /* Allocate additional continuation packets? */
1134 if (avail_dsds == 0) {
1135 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1136 QLA_DSDS_PER_IOCB : used_dsds;
1137 dsd_list_len = (avail_dsds + 1) * 12;
1138 used_dsds -= avail_dsds;
1140 /* allocate tracking DS */
1141 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1145 /* allocate new list */
1146 dsd_ptr->dsd_addr = next_dsd =
1147 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1148 &dsd_ptr->dsd_list_dma);
1152 * Need to cleanup only this dsd_ptr, rest
1153 * will be done by sp_free_dma()
1159 list_add_tail(&dsd_ptr->list,
1160 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1162 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1164 /* add new list to cmd iocb or last list */
1165 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1166 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1167 *cur_dsd++ = dsd_list_len;
1168 cur_dsd = (uint32_t *)next_dsd;
1170 sle_dma = sg_dma_address(sg);
1172 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1173 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1174 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1178 /* Null termination */
1186 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1187 * Type 6 IOCB types.
1189 * @sp: SRB command to process
1190 * @cmd_pkt: Command type 3 IOCB
1191 * @tot_dsds: Total number of segments to transfer
1194 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1195 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1197 uint32_t *cur_dsd, *fcp_dl;
1198 scsi_qla_host_t *vha;
1199 struct scsi_cmnd *cmd;
1200 struct scatterlist *cur_seg;
1202 uint32_t total_bytes = 0;
1203 uint32_t data_bytes;
1205 uint8_t bundling = 1;
1208 struct crc_context *crc_ctx_pkt = NULL;
1209 struct qla_hw_data *ha;
1210 uint8_t additional_fcpcdb_len;
1211 uint16_t fcp_cmnd_len;
1212 struct fcp_cmnd *fcp_cmnd;
1213 dma_addr_t crc_ctx_dma;
1216 cmd = GET_CMD_SP(sp);
1219 /* Update entry type to indicate Command Type CRC_2 IOCB */
1220 *((uint32_t *)(&cmd_pkt->entry_type)) =
1221 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1223 vha = sp->fcport->vha;
1226 /* No data transfer */
1227 data_bytes = scsi_bufflen(cmd);
1228 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1229 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1233 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1235 /* Set transfer direction */
1236 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1237 cmd_pkt->control_flags =
1238 __constant_cpu_to_le16(CF_WRITE_DATA);
1239 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1240 cmd_pkt->control_flags =
1241 __constant_cpu_to_le16(CF_READ_DATA);
1244 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1245 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1246 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1247 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1250 /* Allocate CRC context from global pool */
1251 crc_ctx_pkt = sp->u.scmd.ctx =
1252 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1255 goto crc_queuing_error;
1257 /* Zero out CTX area. */
1258 clr_ptr = (uint8_t *)crc_ctx_pkt;
1259 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1261 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1263 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1266 crc_ctx_pkt->handle = cmd_pkt->handle;
1268 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1270 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1271 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1273 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1274 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1275 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1277 /* Determine SCSI command length -- align to 4 byte boundary */
1278 if (cmd->cmd_len > 16) {
1279 additional_fcpcdb_len = cmd->cmd_len - 16;
1280 if ((cmd->cmd_len % 4) != 0) {
1281 /* SCSI cmd > 16 bytes must be multiple of 4 */
1282 goto crc_queuing_error;
1284 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1286 additional_fcpcdb_len = 0;
1287 fcp_cmnd_len = 12 + 16 + 4;
1290 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1292 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1293 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1294 fcp_cmnd->additional_cdb_len |= 1;
1295 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1296 fcp_cmnd->additional_cdb_len |= 2;
1298 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1299 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1300 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1301 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1302 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1303 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1304 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1305 fcp_cmnd->task_management = 0;
1308 * Update tagged queuing modifier if using command tag queuing
1310 if (scsi_populate_tag_msg(cmd, tag)) {
1312 case HEAD_OF_QUEUE_TAG:
1313 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1315 case ORDERED_QUEUE_TAG:
1316 fcp_cmnd->task_attribute = TSK_ORDERED;
1319 fcp_cmnd->task_attribute = 0;
1323 fcp_cmnd->task_attribute = 0;
1326 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1328 /* Compute dif len and adjust data len to incude protection */
1330 blk_size = cmd->device->sector_size;
1331 dif_bytes = (data_bytes / blk_size) * 8;
1333 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1334 case SCSI_PROT_READ_INSERT:
1335 case SCSI_PROT_WRITE_STRIP:
1336 total_bytes = data_bytes;
1337 data_bytes += dif_bytes;
1340 case SCSI_PROT_READ_STRIP:
1341 case SCSI_PROT_WRITE_INSERT:
1342 case SCSI_PROT_READ_PASS:
1343 case SCSI_PROT_WRITE_PASS:
1344 total_bytes = data_bytes + dif_bytes;
1350 if (!qla2x00_hba_err_chk_enabled(sp))
1351 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1352 /* HBA error checking enabled */
1353 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1354 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1355 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1356 SCSI_PROT_DIF_TYPE2))
1357 fw_prot_opts |= BIT_10;
1358 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1359 SCSI_PROT_DIF_TYPE3)
1360 fw_prot_opts |= BIT_11;
1364 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1367 * Configure Bundling if we need to fetch interlaving
1368 * protection PCI accesses
1370 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1371 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1372 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1374 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1377 /* Finish the common fields of CRC pkt */
1378 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1379 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1380 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1381 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1382 /* Fibre channel byte count */
1383 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1384 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1385 additional_fcpcdb_len);
1386 *fcp_dl = htonl(total_bytes);
1388 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1389 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1392 /* Walks data segments */
1394 cmd_pkt->control_flags |=
1395 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1397 if (!bundling && tot_prot_dsds) {
1398 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1400 goto crc_queuing_error;
1401 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1402 (tot_dsds - tot_prot_dsds)))
1403 goto crc_queuing_error;
1405 if (bundling && tot_prot_dsds) {
1406 /* Walks dif segments */
1407 cur_seg = scsi_prot_sglist(cmd);
1408 cmd_pkt->control_flags |=
1409 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1410 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1411 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1413 goto crc_queuing_error;
1418 /* Cleanup will be performed by the caller */
1420 return QLA_FUNCTION_FAILED;
1424 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1425 * @sp: command to send to the ISP
1427 * Returns non-zero if a failure occurred, else zero.
1430 qla24xx_start_scsi(srb_t *sp)
1433 unsigned long flags;
1437 struct cmd_type_7 *cmd_pkt;
1441 struct req_que *req = NULL;
1442 struct rsp_que *rsp = NULL;
1443 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1444 struct scsi_qla_host *vha = sp->fcport->vha;
1445 struct qla_hw_data *ha = vha->hw;
1448 /* Setup device pointers. */
1451 qla25xx_set_que(sp, &rsp);
1454 /* So we know we haven't pci_map'ed anything yet */
1457 /* Send marker if required */
1458 if (vha->marker_needed != 0) {
1459 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1461 return QLA_FUNCTION_FAILED;
1462 vha->marker_needed = 0;
1465 /* Acquire ring specific lock */
1466 spin_lock_irqsave(&ha->hardware_lock, flags);
1468 /* Check for room in outstanding command list. */
1469 handle = req->current_outstanding_cmd;
1470 for (index = 1; index < req->num_outstanding_cmds; index++) {
1472 if (handle == req->num_outstanding_cmds)
1474 if (!req->outstanding_cmds[handle])
1477 if (index == req->num_outstanding_cmds)
1480 /* Map the sg table so we have an accurate count of sg entries needed */
1481 if (scsi_sg_count(cmd)) {
1482 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1483 scsi_sg_count(cmd), cmd->sc_data_direction);
1484 if (unlikely(!nseg))
1490 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1491 if (req->cnt < (req_cnt + 2)) {
1492 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1494 if (req->ring_index < cnt)
1495 req->cnt = cnt - req->ring_index;
1497 req->cnt = req->length -
1498 (req->ring_index - cnt);
1499 if (req->cnt < (req_cnt + 2))
1503 /* Build command packet. */
1504 req->current_outstanding_cmd = handle;
1505 req->outstanding_cmds[handle] = sp;
1506 sp->handle = handle;
1507 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1508 req->cnt -= req_cnt;
1510 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1511 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1513 /* Zero out remaining portion of packet. */
1514 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1515 clr_ptr = (uint32_t *)cmd_pkt + 2;
1516 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1517 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1519 /* Set NPORT-ID and LUN number*/
1520 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1521 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1522 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1523 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1524 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1526 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1527 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1529 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1530 if (scsi_populate_tag_msg(cmd, tag)) {
1532 case HEAD_OF_QUEUE_TAG:
1533 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1535 case ORDERED_QUEUE_TAG:
1536 cmd_pkt->task = TSK_ORDERED;
1541 /* Load SCSI command packet. */
1542 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1543 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1545 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1547 /* Build IOCB segments */
1548 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1550 /* Set total data segment count. */
1551 cmd_pkt->entry_count = (uint8_t)req_cnt;
1552 /* Specify response queue number where completion should happen */
1553 cmd_pkt->entry_status = (uint8_t) rsp->id;
1555 /* Adjust ring index. */
1557 if (req->ring_index == req->length) {
1558 req->ring_index = 0;
1559 req->ring_ptr = req->ring;
1563 sp->flags |= SRB_DMA_VALID;
1565 /* Set chip new ring index. */
1566 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1567 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1569 /* Manage unprocessed RIO/ZIO commands in response queue. */
1570 if (vha->flags.process_response_queue &&
1571 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1572 qla24xx_process_response_queue(vha, rsp);
1574 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1579 scsi_dma_unmap(cmd);
1581 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1583 return QLA_FUNCTION_FAILED;
1588 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1589 * @sp: command to send to the ISP
1591 * Returns non-zero if a failure occurred, else zero.
1594 qla24xx_dif_start_scsi(srb_t *sp)
1597 unsigned long flags;
1602 uint16_t req_cnt = 0;
1604 uint16_t tot_prot_dsds;
1605 uint16_t fw_prot_opts = 0;
1606 struct req_que *req = NULL;
1607 struct rsp_que *rsp = NULL;
1608 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1609 struct scsi_qla_host *vha = sp->fcport->vha;
1610 struct qla_hw_data *ha = vha->hw;
1611 struct cmd_type_crc_2 *cmd_pkt;
1612 uint32_t status = 0;
1614 #define QDSS_GOT_Q_SPACE BIT_0
1616 /* Only process protection or >16 cdb in this routine */
1617 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1618 if (cmd->cmd_len <= 16)
1619 return qla24xx_start_scsi(sp);
1622 /* Setup device pointers. */
1624 qla25xx_set_que(sp, &rsp);
1627 /* So we know we haven't pci_map'ed anything yet */
1630 /* Send marker if required */
1631 if (vha->marker_needed != 0) {
1632 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1634 return QLA_FUNCTION_FAILED;
1635 vha->marker_needed = 0;
1638 /* Acquire ring specific lock */
1639 spin_lock_irqsave(&ha->hardware_lock, flags);
1641 /* Check for room in outstanding command list. */
1642 handle = req->current_outstanding_cmd;
1643 for (index = 1; index < req->num_outstanding_cmds; index++) {
1645 if (handle == req->num_outstanding_cmds)
1647 if (!req->outstanding_cmds[handle])
1651 if (index == req->num_outstanding_cmds)
1654 /* Compute number of required data segments */
1655 /* Map the sg table so we have an accurate count of sg entries needed */
1656 if (scsi_sg_count(cmd)) {
1657 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1658 scsi_sg_count(cmd), cmd->sc_data_direction);
1659 if (unlikely(!nseg))
1662 sp->flags |= SRB_DMA_VALID;
1664 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1665 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1666 struct qla2_sgx sgx;
1669 memset(&sgx, 0, sizeof(struct qla2_sgx));
1670 sgx.tot_bytes = scsi_bufflen(cmd);
1671 sgx.cur_sg = scsi_sglist(cmd);
1675 while (qla24xx_get_one_block_sg(
1676 cmd->device->sector_size, &sgx, &partial))
1682 /* number of required data segments */
1685 /* Compute number of required protection segments */
1686 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1687 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1688 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1689 if (unlikely(!nseg))
1692 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1694 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1695 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1696 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1703 /* Total Data and protection sg segment(s) */
1704 tot_prot_dsds = nseg;
1706 if (req->cnt < (req_cnt + 2)) {
1707 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1709 if (req->ring_index < cnt)
1710 req->cnt = cnt - req->ring_index;
1712 req->cnt = req->length -
1713 (req->ring_index - cnt);
1714 if (req->cnt < (req_cnt + 2))
1718 status |= QDSS_GOT_Q_SPACE;
1720 /* Build header part of command packet (excluding the OPCODE). */
1721 req->current_outstanding_cmd = handle;
1722 req->outstanding_cmds[handle] = sp;
1723 sp->handle = handle;
1724 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1725 req->cnt -= req_cnt;
1727 /* Fill-in common area */
1728 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1729 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1731 clr_ptr = (uint32_t *)cmd_pkt + 2;
1732 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1734 /* Set NPORT-ID and LUN number*/
1735 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1736 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1737 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1738 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1740 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1741 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1743 /* Total Data and protection segment(s) */
1744 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1746 /* Build IOCB segments and adjust for data protection segments */
1747 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1748 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1752 cmd_pkt->entry_count = (uint8_t)req_cnt;
1753 /* Specify response queue number where completion should happen */
1754 cmd_pkt->entry_status = (uint8_t) rsp->id;
1755 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1758 /* Adjust ring index. */
1760 if (req->ring_index == req->length) {
1761 req->ring_index = 0;
1762 req->ring_ptr = req->ring;
1766 /* Set chip new ring index. */
1767 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1768 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1770 /* Manage unprocessed RIO/ZIO commands in response queue. */
1771 if (vha->flags.process_response_queue &&
1772 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1773 qla24xx_process_response_queue(vha, rsp);
1775 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1780 if (status & QDSS_GOT_Q_SPACE) {
1781 req->outstanding_cmds[handle] = NULL;
1782 req->cnt += req_cnt;
1784 /* Cleanup will be performed by the caller (queuecommand) */
1786 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1787 return QLA_FUNCTION_FAILED;
1791 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1793 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1794 struct qla_hw_data *ha = sp->fcport->vha->hw;
1795 int affinity = cmd->request->cpu;
1797 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1798 affinity < ha->max_rsp_queues - 1)
1799 *rsp = ha->rsp_q_map[affinity + 1];
1801 *rsp = ha->rsp_q_map[0];
1804 /* Generic Control-SRB manipulation functions. */
1806 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1808 struct qla_hw_data *ha = vha->hw;
1809 struct req_que *req = ha->req_q_map[0];
1810 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1811 uint32_t index, handle;
1813 uint16_t cnt, req_cnt;
1820 goto skip_cmd_array;
1822 /* Check for room in outstanding command list. */
1823 handle = req->current_outstanding_cmd;
1824 for (index = 1; req->num_outstanding_cmds; index++) {
1826 if (handle == req->num_outstanding_cmds)
1828 if (!req->outstanding_cmds[handle])
1831 if (index == req->num_outstanding_cmds) {
1832 ql_log(ql_log_warn, vha, 0x700b,
1833 "No room on outstanding cmd array.\n");
1837 /* Prep command array. */
1838 req->current_outstanding_cmd = handle;
1839 req->outstanding_cmds[handle] = sp;
1840 sp->handle = handle;
1842 /* Adjust entry-counts as needed. */
1843 if (sp->type != SRB_SCSI_CMD)
1844 req_cnt = sp->iocbs;
1847 /* Check for room on request queue. */
1848 if (req->cnt < req_cnt) {
1849 if (ha->mqenable || IS_QLA83XX(ha))
1850 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
1851 else if (IS_QLA82XX(ha))
1852 cnt = RD_REG_DWORD(®->isp82.req_q_out);
1853 else if (IS_FWI2_CAPABLE(ha))
1854 cnt = RD_REG_DWORD(®->isp24.req_q_out);
1856 cnt = qla2x00_debounce_register(
1857 ISP_REQ_Q_OUT(ha, ®->isp));
1859 if (req->ring_index < cnt)
1860 req->cnt = cnt - req->ring_index;
1862 req->cnt = req->length -
1863 (req->ring_index - cnt);
1865 if (req->cnt < req_cnt)
1869 req->cnt -= req_cnt;
1870 pkt = req->ring_ptr;
1871 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1872 pkt->entry_count = req_cnt;
1873 pkt->handle = handle;
1880 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1882 struct srb_iocb *lio = &sp->u.iocb_cmd;
1884 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1885 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1886 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1887 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1888 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1889 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1890 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1891 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1892 logio->port_id[1] = sp->fcport->d_id.b.area;
1893 logio->port_id[2] = sp->fcport->d_id.b.domain;
1894 logio->vp_index = sp->fcport->vha->vp_idx;
1898 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1900 struct qla_hw_data *ha = sp->fcport->vha->hw;
1901 struct srb_iocb *lio = &sp->u.iocb_cmd;
1904 mbx->entry_type = MBX_IOCB_TYPE;
1905 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1906 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1907 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1908 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1909 if (HAS_EXTENDED_IDS(ha)) {
1910 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1911 mbx->mb10 = cpu_to_le16(opts);
1913 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1915 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1916 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1917 sp->fcport->d_id.b.al_pa);
1918 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1922 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1924 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1925 logio->control_flags =
1926 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1927 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1928 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1929 logio->port_id[1] = sp->fcport->d_id.b.area;
1930 logio->port_id[2] = sp->fcport->d_id.b.domain;
1931 logio->vp_index = sp->fcport->vha->vp_idx;
1935 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1937 struct qla_hw_data *ha = sp->fcport->vha->hw;
1939 mbx->entry_type = MBX_IOCB_TYPE;
1940 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1941 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1942 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1943 cpu_to_le16(sp->fcport->loop_id):
1944 cpu_to_le16(sp->fcport->loop_id << 8);
1945 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1946 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1947 sp->fcport->d_id.b.al_pa);
1948 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1949 /* Implicit: mbx->mbx10 = 0. */
1953 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1955 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1956 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1957 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1958 logio->vp_index = sp->fcport->vha->vp_idx;
1962 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1964 struct qla_hw_data *ha = sp->fcport->vha->hw;
1966 mbx->entry_type = MBX_IOCB_TYPE;
1967 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1968 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1969 if (HAS_EXTENDED_IDS(ha)) {
1970 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1971 mbx->mb10 = cpu_to_le16(BIT_0);
1973 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1975 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1976 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1977 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1978 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1979 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1983 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1987 struct fc_port *fcport = sp->fcport;
1988 scsi_qla_host_t *vha = fcport->vha;
1989 struct qla_hw_data *ha = vha->hw;
1990 struct srb_iocb *iocb = &sp->u.iocb_cmd;
1991 struct req_que *req = vha->req;
1993 flags = iocb->u.tmf.flags;
1994 lun = iocb->u.tmf.lun;
1996 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1997 tsk->entry_count = 1;
1998 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1999 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2000 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2001 tsk->control_flags = cpu_to_le32(flags);
2002 tsk->port_id[0] = fcport->d_id.b.al_pa;
2003 tsk->port_id[1] = fcport->d_id.b.area;
2004 tsk->port_id[2] = fcport->d_id.b.domain;
2005 tsk->vp_index = fcport->vha->vp_idx;
2007 if (flags == TCF_LUN_RESET) {
2008 int_to_scsilun(lun, &tsk->lun);
2009 host_to_fcp_swap((uint8_t *)&tsk->lun,
2015 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2017 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2019 els_iocb->entry_type = ELS_IOCB_TYPE;
2020 els_iocb->entry_count = 1;
2021 els_iocb->sys_define = 0;
2022 els_iocb->entry_status = 0;
2023 els_iocb->handle = sp->handle;
2024 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2025 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2026 els_iocb->vp_index = sp->fcport->vha->vp_idx;
2027 els_iocb->sof_type = EST_SOFI3;
2028 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2031 sp->type == SRB_ELS_CMD_RPT ?
2032 bsg_job->request->rqst_data.r_els.els_code :
2033 bsg_job->request->rqst_data.h_els.command_code;
2034 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2035 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2036 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2037 els_iocb->control_flags = 0;
2038 els_iocb->rx_byte_count =
2039 cpu_to_le32(bsg_job->reply_payload.payload_len);
2040 els_iocb->tx_byte_count =
2041 cpu_to_le32(bsg_job->request_payload.payload_len);
2043 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2044 (bsg_job->request_payload.sg_list)));
2045 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2046 (bsg_job->request_payload.sg_list)));
2047 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2048 (bsg_job->request_payload.sg_list));
2050 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2051 (bsg_job->reply_payload.sg_list)));
2052 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2053 (bsg_job->reply_payload.sg_list)));
2054 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2055 (bsg_job->reply_payload.sg_list));
2059 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2061 uint16_t avail_dsds;
2063 struct scatterlist *sg;
2066 scsi_qla_host_t *vha = sp->fcport->vha;
2067 struct qla_hw_data *ha = vha->hw;
2068 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2069 int loop_iterartion = 0;
2070 int cont_iocb_prsnt = 0;
2071 int entry_count = 1;
2073 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2074 ct_iocb->entry_type = CT_IOCB_TYPE;
2075 ct_iocb->entry_status = 0;
2076 ct_iocb->handle1 = sp->handle;
2077 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2078 ct_iocb->status = __constant_cpu_to_le16(0);
2079 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2080 ct_iocb->timeout = 0;
2081 ct_iocb->cmd_dsd_count =
2082 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2083 ct_iocb->total_dsd_count =
2084 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2085 ct_iocb->req_bytecount =
2086 cpu_to_le32(bsg_job->request_payload.payload_len);
2087 ct_iocb->rsp_bytecount =
2088 cpu_to_le32(bsg_job->reply_payload.payload_len);
2090 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2091 (bsg_job->request_payload.sg_list)));
2092 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2093 (bsg_job->request_payload.sg_list)));
2094 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2096 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2097 (bsg_job->reply_payload.sg_list)));
2098 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2099 (bsg_job->reply_payload.sg_list)));
2100 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2103 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2105 tot_dsds = bsg_job->reply_payload.sg_cnt;
2107 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2109 cont_a64_entry_t *cont_pkt;
2111 /* Allocate additional continuation packets? */
2112 if (avail_dsds == 0) {
2114 * Five DSDs are available in the Cont.
2117 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2118 vha->hw->req_q_map[0]);
2119 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2121 cont_iocb_prsnt = 1;
2125 sle_dma = sg_dma_address(sg);
2126 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2127 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2128 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2132 ct_iocb->entry_count = entry_count;
2136 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2138 uint16_t avail_dsds;
2140 struct scatterlist *sg;
2143 scsi_qla_host_t *vha = sp->fcport->vha;
2144 struct qla_hw_data *ha = vha->hw;
2145 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2146 int loop_iterartion = 0;
2147 int cont_iocb_prsnt = 0;
2148 int entry_count = 1;
2150 ct_iocb->entry_type = CT_IOCB_TYPE;
2151 ct_iocb->entry_status = 0;
2152 ct_iocb->sys_define = 0;
2153 ct_iocb->handle = sp->handle;
2155 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2156 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2157 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2159 ct_iocb->cmd_dsd_count =
2160 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2161 ct_iocb->timeout = 0;
2162 ct_iocb->rsp_dsd_count =
2163 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2164 ct_iocb->rsp_byte_count =
2165 cpu_to_le32(bsg_job->reply_payload.payload_len);
2166 ct_iocb->cmd_byte_count =
2167 cpu_to_le32(bsg_job->request_payload.payload_len);
2168 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2169 (bsg_job->request_payload.sg_list)));
2170 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2171 (bsg_job->request_payload.sg_list)));
2172 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2173 (bsg_job->request_payload.sg_list));
2176 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2178 tot_dsds = bsg_job->reply_payload.sg_cnt;
2180 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2182 cont_a64_entry_t *cont_pkt;
2184 /* Allocate additional continuation packets? */
2185 if (avail_dsds == 0) {
2187 * Five DSDs are available in the Cont.
2190 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2192 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2194 cont_iocb_prsnt = 1;
2198 sle_dma = sg_dma_address(sg);
2199 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2200 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2201 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2205 ct_iocb->entry_count = entry_count;
2209 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2210 * @sp: command to send to the ISP
2212 * Returns non-zero if a failure occurred, else zero.
2215 qla82xx_start_scsi(srb_t *sp)
2218 unsigned long flags;
2219 struct scsi_cmnd *cmd;
2226 struct device_reg_82xx __iomem *reg;
2229 uint8_t additional_cdb_len;
2230 struct ct6_dsd *ctx;
2231 struct scsi_qla_host *vha = sp->fcport->vha;
2232 struct qla_hw_data *ha = vha->hw;
2233 struct req_que *req = NULL;
2234 struct rsp_que *rsp = NULL;
2237 /* Setup device pointers. */
2239 reg = &ha->iobase->isp82;
2240 cmd = GET_CMD_SP(sp);
2242 rsp = ha->rsp_q_map[0];
2244 /* So we know we haven't pci_map'ed anything yet */
2247 dbval = 0x04 | (ha->portnum << 5);
2249 /* Send marker if required */
2250 if (vha->marker_needed != 0) {
2251 if (qla2x00_marker(vha, req,
2252 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2253 ql_log(ql_log_warn, vha, 0x300c,
2254 "qla2x00_marker failed for cmd=%p.\n", cmd);
2255 return QLA_FUNCTION_FAILED;
2257 vha->marker_needed = 0;
2260 /* Acquire ring specific lock */
2261 spin_lock_irqsave(&ha->hardware_lock, flags);
2263 /* Check for room in outstanding command list. */
2264 handle = req->current_outstanding_cmd;
2265 for (index = 1; index < req->num_outstanding_cmds; index++) {
2267 if (handle == req->num_outstanding_cmds)
2269 if (!req->outstanding_cmds[handle])
2272 if (index == req->num_outstanding_cmds)
2275 /* Map the sg table so we have an accurate count of sg entries needed */
2276 if (scsi_sg_count(cmd)) {
2277 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2278 scsi_sg_count(cmd), cmd->sc_data_direction);
2279 if (unlikely(!nseg))
2286 if (tot_dsds > ql2xshiftctondsd) {
2287 struct cmd_type_6 *cmd_pkt;
2288 uint16_t more_dsd_lists = 0;
2289 struct dsd_dma *dsd_ptr;
2292 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2293 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2294 ql_dbg(ql_dbg_io, vha, 0x300d,
2295 "Num of DSD list %d is than %d for cmd=%p.\n",
2296 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2301 if (more_dsd_lists <= ha->gbl_dsd_avail)
2302 goto sufficient_dsds;
2304 more_dsd_lists -= ha->gbl_dsd_avail;
2306 for (i = 0; i < more_dsd_lists; i++) {
2307 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2309 ql_log(ql_log_fatal, vha, 0x300e,
2310 "Failed to allocate memory for dsd_dma "
2311 "for cmd=%p.\n", cmd);
2315 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2316 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2317 if (!dsd_ptr->dsd_addr) {
2319 ql_log(ql_log_fatal, vha, 0x300f,
2320 "Failed to allocate memory for dsd_addr "
2321 "for cmd=%p.\n", cmd);
2324 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2325 ha->gbl_dsd_avail++;
2331 if (req->cnt < (req_cnt + 2)) {
2332 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2333 ®->req_q_out[0]);
2334 if (req->ring_index < cnt)
2335 req->cnt = cnt - req->ring_index;
2337 req->cnt = req->length -
2338 (req->ring_index - cnt);
2339 if (req->cnt < (req_cnt + 2))
2343 ctx = sp->u.scmd.ctx =
2344 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2346 ql_log(ql_log_fatal, vha, 0x3010,
2347 "Failed to allocate ctx for cmd=%p.\n", cmd);
2351 memset(ctx, 0, sizeof(struct ct6_dsd));
2352 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2353 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2354 if (!ctx->fcp_cmnd) {
2355 ql_log(ql_log_fatal, vha, 0x3011,
2356 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2360 /* Initialize the DSD list and dma handle */
2361 INIT_LIST_HEAD(&ctx->dsd_list);
2362 ctx->dsd_use_cnt = 0;
2364 if (cmd->cmd_len > 16) {
2365 additional_cdb_len = cmd->cmd_len - 16;
2366 if ((cmd->cmd_len % 4) != 0) {
2367 /* SCSI command bigger than 16 bytes must be
2370 ql_log(ql_log_warn, vha, 0x3012,
2371 "scsi cmd len %d not multiple of 4 "
2372 "for cmd=%p.\n", cmd->cmd_len, cmd);
2373 goto queuing_error_fcp_cmnd;
2375 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2377 additional_cdb_len = 0;
2378 ctx->fcp_cmnd_len = 12 + 16 + 4;
2381 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2382 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2384 /* Zero out remaining portion of packet. */
2385 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2386 clr_ptr = (uint32_t *)cmd_pkt + 2;
2387 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2388 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2390 /* Set NPORT-ID and LUN number*/
2391 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2392 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2393 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2394 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2395 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2397 /* Build IOCB segments */
2398 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2399 goto queuing_error_fcp_cmnd;
2401 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2402 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2404 /* build FCP_CMND IU */
2405 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2406 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2407 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2409 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2410 ctx->fcp_cmnd->additional_cdb_len |= 1;
2411 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2412 ctx->fcp_cmnd->additional_cdb_len |= 2;
2415 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2417 if (scsi_populate_tag_msg(cmd, tag)) {
2419 case HEAD_OF_QUEUE_TAG:
2420 ctx->fcp_cmnd->task_attribute =
2423 case ORDERED_QUEUE_TAG:
2424 ctx->fcp_cmnd->task_attribute =
2430 /* Populate the FCP_PRIO. */
2431 if (ha->flags.fcp_prio_enabled)
2432 ctx->fcp_cmnd->task_attribute |=
2433 sp->fcport->fcp_prio << 3;
2435 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2437 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2438 additional_cdb_len);
2439 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2441 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2442 cmd_pkt->fcp_cmnd_dseg_address[0] =
2443 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2444 cmd_pkt->fcp_cmnd_dseg_address[1] =
2445 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2447 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2448 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2449 /* Set total data segment count. */
2450 cmd_pkt->entry_count = (uint8_t)req_cnt;
2451 /* Specify response queue number where
2452 * completion should happen
2454 cmd_pkt->entry_status = (uint8_t) rsp->id;
2456 struct cmd_type_7 *cmd_pkt;
2457 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2458 if (req->cnt < (req_cnt + 2)) {
2459 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2460 ®->req_q_out[0]);
2461 if (req->ring_index < cnt)
2462 req->cnt = cnt - req->ring_index;
2464 req->cnt = req->length -
2465 (req->ring_index - cnt);
2467 if (req->cnt < (req_cnt + 2))
2470 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2471 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2473 /* Zero out remaining portion of packet. */
2474 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2475 clr_ptr = (uint32_t *)cmd_pkt + 2;
2476 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2477 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2479 /* Set NPORT-ID and LUN number*/
2480 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2481 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2482 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2483 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2484 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2486 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2487 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2488 sizeof(cmd_pkt->lun));
2491 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2493 if (scsi_populate_tag_msg(cmd, tag)) {
2495 case HEAD_OF_QUEUE_TAG:
2496 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2498 case ORDERED_QUEUE_TAG:
2499 cmd_pkt->task = TSK_ORDERED;
2504 /* Populate the FCP_PRIO. */
2505 if (ha->flags.fcp_prio_enabled)
2506 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2508 /* Load SCSI command packet. */
2509 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2510 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2512 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2514 /* Build IOCB segments */
2515 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2517 /* Set total data segment count. */
2518 cmd_pkt->entry_count = (uint8_t)req_cnt;
2519 /* Specify response queue number where
2520 * completion should happen.
2522 cmd_pkt->entry_status = (uint8_t) rsp->id;
2525 /* Build command packet. */
2526 req->current_outstanding_cmd = handle;
2527 req->outstanding_cmds[handle] = sp;
2528 sp->handle = handle;
2529 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2530 req->cnt -= req_cnt;
2533 /* Adjust ring index. */
2535 if (req->ring_index == req->length) {
2536 req->ring_index = 0;
2537 req->ring_ptr = req->ring;
2541 sp->flags |= SRB_DMA_VALID;
2543 /* Set chip new ring index. */
2544 /* write, read and verify logic */
2545 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2547 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2550 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2553 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2555 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2561 /* Manage unprocessed RIO/ZIO commands in response queue. */
2562 if (vha->flags.process_response_queue &&
2563 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2564 qla24xx_process_response_queue(vha, rsp);
2566 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2569 queuing_error_fcp_cmnd:
2570 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2573 scsi_dma_unmap(cmd);
2575 if (sp->u.scmd.ctx) {
2576 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2577 sp->u.scmd.ctx = NULL;
2579 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2581 return QLA_FUNCTION_FAILED;
2585 qla2x00_start_sp(srb_t *sp)
2588 struct qla_hw_data *ha = sp->fcport->vha->hw;
2590 unsigned long flags;
2592 rval = QLA_FUNCTION_FAILED;
2593 spin_lock_irqsave(&ha->hardware_lock, flags);
2594 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2596 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2597 "qla2x00_alloc_iocbs failed.\n");
2604 IS_FWI2_CAPABLE(ha) ?
2605 qla24xx_login_iocb(sp, pkt) :
2606 qla2x00_login_iocb(sp, pkt);
2608 case SRB_LOGOUT_CMD:
2609 IS_FWI2_CAPABLE(ha) ?
2610 qla24xx_logout_iocb(sp, pkt) :
2611 qla2x00_logout_iocb(sp, pkt);
2613 case SRB_ELS_CMD_RPT:
2614 case SRB_ELS_CMD_HST:
2615 qla24xx_els_iocb(sp, pkt);
2618 IS_FWI2_CAPABLE(ha) ?
2619 qla24xx_ct_iocb(sp, pkt) :
2620 qla2x00_ct_iocb(sp, pkt);
2623 IS_FWI2_CAPABLE(ha) ?
2624 qla24xx_adisc_iocb(sp, pkt) :
2625 qla2x00_adisc_iocb(sp, pkt);
2628 qla24xx_tm_iocb(sp, pkt);
2635 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2637 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2642 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2643 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2645 uint16_t avail_dsds;
2647 uint32_t req_data_len = 0;
2648 uint32_t rsp_data_len = 0;
2649 struct scatterlist *sg;
2651 int entry_count = 1;
2652 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2654 /*Update entry type to indicate bidir command */
2655 *((uint32_t *)(&cmd_pkt->entry_type)) =
2656 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2658 /* Set the transfer direction, in this set both flags
2659 * Also set the BD_WRAP_BACK flag, firmware will take care
2660 * assigning DID=SID for outgoing pkts.
2662 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2663 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2664 cmd_pkt->control_flags =
2665 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2668 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2669 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2670 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2671 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2673 vha->bidi_stats.transfer_bytes += req_data_len;
2674 vha->bidi_stats.io_count++;
2676 /* Only one dsd is available for bidirectional IOCB, remaining dsds
2677 * are bundled in continuation iocb
2680 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2684 for_each_sg(bsg_job->request_payload.sg_list, sg,
2685 bsg_job->request_payload.sg_cnt, index) {
2687 cont_a64_entry_t *cont_pkt;
2689 /* Allocate additional continuation packets */
2690 if (avail_dsds == 0) {
2691 /* Continuation type 1 IOCB can accomodate
2694 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2695 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2699 sle_dma = sg_dma_address(sg);
2700 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2701 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2702 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2705 /* For read request DSD will always goes to continuation IOCB
2706 * and follow the write DSD. If there is room on the current IOCB
2707 * then it is added to that IOCB else new continuation IOCB is
2710 for_each_sg(bsg_job->reply_payload.sg_list, sg,
2711 bsg_job->reply_payload.sg_cnt, index) {
2713 cont_a64_entry_t *cont_pkt;
2715 /* Allocate additional continuation packets */
2716 if (avail_dsds == 0) {
2717 /* Continuation type 1 IOCB can accomodate
2720 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2721 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2725 sle_dma = sg_dma_address(sg);
2726 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2727 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2728 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2731 /* This value should be same as number of IOCB required for this cmd */
2732 cmd_pkt->entry_count = entry_count;
2736 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2739 struct qla_hw_data *ha = vha->hw;
2740 unsigned long flags;
2746 struct cmd_bidir *cmd_pkt = NULL;
2747 struct rsp_que *rsp;
2748 struct req_que *req;
2749 int rval = EXT_STATUS_OK;
2753 rsp = ha->rsp_q_map[0];
2756 /* Send marker if required */
2757 if (vha->marker_needed != 0) {
2758 if (qla2x00_marker(vha, req,
2759 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2760 return EXT_STATUS_MAILBOX;
2761 vha->marker_needed = 0;
2764 /* Acquire ring specific lock */
2765 spin_lock_irqsave(&ha->hardware_lock, flags);
2767 /* Check for room in outstanding command list. */
2768 handle = req->current_outstanding_cmd;
2769 for (index = 1; index < req->num_outstanding_cmds; index++) {
2771 if (handle == req->num_outstanding_cmds)
2773 if (!req->outstanding_cmds[handle])
2777 if (index == req->num_outstanding_cmds) {
2778 rval = EXT_STATUS_BUSY;
2782 /* Calculate number of IOCB required */
2783 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2785 /* Check for room on request queue. */
2786 if (req->cnt < req_cnt + 2) {
2787 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
2789 if (req->ring_index < cnt)
2790 req->cnt = cnt - req->ring_index;
2792 req->cnt = req->length -
2793 (req->ring_index - cnt);
2795 if (req->cnt < req_cnt + 2) {
2796 rval = EXT_STATUS_BUSY;
2800 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2801 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2803 /* Zero out remaining portion of packet. */
2804 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2805 clr_ptr = (uint32_t *)cmd_pkt + 2;
2806 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2808 /* Set NPORT-ID (of vha)*/
2809 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2810 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2811 cmd_pkt->port_id[1] = vha->d_id.b.area;
2812 cmd_pkt->port_id[2] = vha->d_id.b.domain;
2814 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2815 cmd_pkt->entry_status = (uint8_t) rsp->id;
2816 /* Build command packet. */
2817 req->current_outstanding_cmd = handle;
2818 req->outstanding_cmds[handle] = sp;
2819 sp->handle = handle;
2820 req->cnt -= req_cnt;
2822 /* Send the command to the firmware */
2824 qla2x00_start_iocbs(vha, req);
2826 spin_unlock_irqrestore(&ha->hardware_lock, flags);