2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 * based on qla2x00t.c code:
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 * Forward port and refactoring to modern qla2xxx and target/configfs
13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/blkdev.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/delay.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <asm/unaligned.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
43 #include "qla_target.h"
45 static int ql2xtgt_tape_enable;
46 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
47 MODULE_PARM_DESC(ql2xtgt_tape_enable,
48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
50 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
51 module_param(qlini_mode, charp, S_IRUGO);
52 MODULE_PARM_DESC(qlini_mode,
53 "Determines when initiator mode will be enabled. Possible values: "
54 "\"exclusive\" - initiator mode will be enabled on load, "
55 "disabled on enabling target mode and then on disabling target mode "
57 "\"disabled\" - initiator mode will never be enabled; "
58 "\"enabled\" (default) - initiator mode will always stay enabled.");
60 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
62 static int temp_sam_status = SAM_STAT_BUSY;
65 * From scsi/fc/fc_fcp.h
67 enum fcp_resp_rsp_codes {
69 FCP_DATA_LEN_INVALID = 1,
70 FCP_CMND_FIELDS_INVALID = 2,
71 FCP_DATA_PARAM_MISMATCH = 3,
74 FCP_TMF_INVALID_LUN = 9,
78 * fc_pri_ta from scsi/fc/fc_fcp.h
80 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
81 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
82 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
83 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
84 #define FCP_PTA_MASK 7 /* mask for task attribute field */
85 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
86 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
89 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
90 * must be called under HW lock and could unlock/lock it inside.
91 * It isn't an issue, since in the current implementation on the time when
92 * those functions are called:
94 * - Either context is IRQ and only IRQ handler can modify HW data,
95 * including rings related fields,
97 * - Or access to target mode variables from struct qla_tgt doesn't
98 * cross those functions boundaries, except tgt_stop, which
99 * additionally protected by irq_cmd_count.
101 /* Predefs for callbacks handed to qla2xxx LLD */
102 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
103 struct atio_from_isp *pkt);
104 static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
105 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
106 int fn, void *iocb, int flags);
107 static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked);
109 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
110 struct qla_tgt_srr_imm *imm, int ha_lock);
111 static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
112 struct qla_tgt_cmd *cmd);
113 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull);
118 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
119 static mempool_t *qla_tgt_mgmt_cmd_mempool;
120 static struct workqueue_struct *qla_tgt_wq;
121 static DEFINE_MUTEX(qla_tgt_mutex);
122 static LIST_HEAD(qla_tgt_glist);
124 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
125 static struct qla_tgt_sess *qlt_find_sess_by_port_name(
127 const uint8_t *port_name)
129 struct qla_tgt_sess *sess;
131 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
132 if (!memcmp(sess->port_name, port_name, WWN_SIZE))
139 /* Might release hw lock, then reaquire!! */
140 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
142 /* Send marker if required */
143 if (unlikely(vha->marker_needed != 0)) {
144 int rc = qla2x00_issue_marker(vha, vha_locked);
145 if (rc != QLA_SUCCESS) {
146 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
147 "qla_target(%d): issue_marker() failed\n",
156 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
159 struct qla_hw_data *ha = vha->hw;
162 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
165 if (vha->d_id.b.al_pa == d_id[2])
168 BUG_ON(ha->tgt.tgt_vp_map == NULL);
169 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
170 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
171 return ha->tgt.tgt_vp_map[vp_idx].vha;
177 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
180 struct qla_hw_data *ha = vha->hw;
182 if (vha->vp_idx == vp_idx)
185 BUG_ON(ha->tgt.tgt_vp_map == NULL);
186 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
187 return ha->tgt.tgt_vp_map[vp_idx].vha;
192 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
196 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
198 vha->hw->tgt.num_pend_cmds++;
199 if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
200 vha->hw->qla_stats.stat_max_pend_cmds =
201 vha->hw->tgt.num_pend_cmds;
202 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
204 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
208 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
209 vha->hw->tgt.num_pend_cmds--;
210 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
213 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
214 struct atio_from_isp *atio)
216 ql_dbg(ql_dbg_tgt, vha, 0xe072,
217 "%s: qla_target(%d): type %x ox_id %04x\n",
218 __func__, vha->vp_idx, atio->u.raw.entry_type,
219 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
221 switch (atio->u.raw.entry_type) {
224 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
225 atio->u.isp24.fcp_hdr.d_id);
226 if (unlikely(NULL == host)) {
227 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
228 "qla_target(%d): Received ATIO_TYPE7 "
229 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
230 atio->u.isp24.fcp_hdr.d_id[0],
231 atio->u.isp24.fcp_hdr.d_id[1],
232 atio->u.isp24.fcp_hdr.d_id[2]);
235 qlt_24xx_atio_pkt(host, atio);
239 case IMMED_NOTIFY_TYPE:
241 struct scsi_qla_host *host = vha;
242 struct imm_ntfy_from_isp *entry =
243 (struct imm_ntfy_from_isp *)atio;
245 if ((entry->u.isp24.vp_index != 0xFF) &&
246 (entry->u.isp24.nport_handle != 0xFFFF)) {
247 host = qlt_find_host_by_vp_idx(vha,
248 entry->u.isp24.vp_index);
249 if (unlikely(!host)) {
250 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
251 "qla_target(%d): Received "
252 "ATIO (IMMED_NOTIFY_TYPE) "
253 "with unknown vp_index %d\n",
254 vha->vp_idx, entry->u.isp24.vp_index);
258 qlt_24xx_atio_pkt(host, atio);
263 ql_dbg(ql_dbg_tgt, vha, 0xe040,
264 "qla_target(%d): Received unknown ATIO atio "
265 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
272 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
274 switch (pkt->entry_type) {
276 ql_dbg(ql_dbg_tgt, vha, 0xe073,
277 "qla_target(%d):%s: CRC2 Response pkt\n",
278 vha->vp_idx, __func__);
281 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
282 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
284 if (unlikely(!host)) {
285 ql_dbg(ql_dbg_tgt, vha, 0xe041,
286 "qla_target(%d): Response pkt (CTIO_TYPE7) "
287 "received, with unknown vp_index %d\n",
288 vha->vp_idx, entry->vp_index);
291 qlt_response_pkt(host, pkt);
295 case IMMED_NOTIFY_TYPE:
297 struct scsi_qla_host *host = vha;
298 struct imm_ntfy_from_isp *entry =
299 (struct imm_ntfy_from_isp *)pkt;
301 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
302 if (unlikely(!host)) {
303 ql_dbg(ql_dbg_tgt, vha, 0xe042,
304 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
305 "received, with unknown vp_index %d\n",
306 vha->vp_idx, entry->u.isp24.vp_index);
309 qlt_response_pkt(host, pkt);
313 case NOTIFY_ACK_TYPE:
315 struct scsi_qla_host *host = vha;
316 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
318 if (0xFF != entry->u.isp24.vp_index) {
319 host = qlt_find_host_by_vp_idx(vha,
320 entry->u.isp24.vp_index);
321 if (unlikely(!host)) {
322 ql_dbg(ql_dbg_tgt, vha, 0xe043,
323 "qla_target(%d): Response "
324 "pkt (NOTIFY_ACK_TYPE) "
325 "received, with unknown "
326 "vp_index %d\n", vha->vp_idx,
327 entry->u.isp24.vp_index);
331 qlt_response_pkt(host, pkt);
337 struct abts_recv_from_24xx *entry =
338 (struct abts_recv_from_24xx *)pkt;
339 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
341 if (unlikely(!host)) {
342 ql_dbg(ql_dbg_tgt, vha, 0xe044,
343 "qla_target(%d): Response pkt "
344 "(ABTS_RECV_24XX) received, with unknown "
345 "vp_index %d\n", vha->vp_idx, entry->vp_index);
348 qlt_response_pkt(host, pkt);
354 struct abts_resp_to_24xx *entry =
355 (struct abts_resp_to_24xx *)pkt;
356 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
358 if (unlikely(!host)) {
359 ql_dbg(ql_dbg_tgt, vha, 0xe045,
360 "qla_target(%d): Response pkt "
361 "(ABTS_RECV_24XX) received, with unknown "
362 "vp_index %d\n", vha->vp_idx, entry->vp_index);
365 qlt_response_pkt(host, pkt);
370 qlt_response_pkt(vha, pkt);
376 static void qlt_free_session_done(struct work_struct *work)
378 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
380 struct qla_tgt *tgt = sess->tgt;
381 struct scsi_qla_host *vha = sess->vha;
382 struct qla_hw_data *ha = vha->hw;
386 * Release the target session for FC Nexus from fabric module code.
388 if (sess->se_sess != NULL)
389 ha->tgt.tgt_ops->free_session(sess);
391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
392 "Unregistration of sess %p finished\n", sess);
396 * We need to protect against race, when tgt is freed before or
400 if (tgt->sess_count == 0)
401 wake_up_all(&tgt->waitQ);
404 /* ha->hardware_lock supposed to be held on entry */
405 void qlt_unreg_sess(struct qla_tgt_sess *sess)
407 struct scsi_qla_host *vha = sess->vha;
409 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
411 list_del(&sess->sess_list_entry);
413 list_del(&sess->del_list_entry);
415 INIT_WORK(&sess->free_work, qlt_free_session_done);
416 schedule_work(&sess->free_work);
418 EXPORT_SYMBOL(qlt_unreg_sess);
420 /* ha->hardware_lock supposed to be held on entry */
421 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
423 struct qla_hw_data *ha = vha->hw;
424 struct qla_tgt_sess *sess = NULL;
425 uint32_t unpacked_lun, lun = 0;
428 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
429 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
431 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
432 if (loop_id == 0xFFFF) {
433 #if 0 /* FIXME: Re-enable Global event handling.. */
435 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
436 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
437 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
438 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
439 typeof(*sess), sess_list_entry);
441 case QLA_TGT_NEXUS_LOSS_SESS:
442 mcmd = QLA_TGT_NEXUS_LOSS;
444 case QLA_TGT_ABORT_ALL_SESS:
445 mcmd = QLA_TGT_ABORT_ALL;
447 case QLA_TGT_NEXUS_LOSS:
448 case QLA_TGT_ABORT_ALL:
451 ql_dbg(ql_dbg_tgt, vha, 0xe046,
452 "qla_target(%d): Not allowed "
453 "command %x in %s", vha->vp_idx,
462 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
465 ql_dbg(ql_dbg_tgt, vha, 0xe000,
466 "Using sess for qla_tgt_reset: %p\n", sess);
472 ql_dbg(ql_dbg_tgt, vha, 0xe047,
473 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
474 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
477 lun = a->u.isp24.fcp_cmnd.lun;
478 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
480 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
481 iocb, QLA24XX_MGMT_SEND_NACK);
484 /* ha->hardware_lock supposed to be held on entry */
485 static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
488 struct qla_tgt *tgt = sess->tgt;
489 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
494 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
495 "Scheduling sess %p for deletion\n", sess);
496 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
502 sess->expires = jiffies + dev_loss_tmo * HZ;
504 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
505 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
506 "deletion in %u secs (expires: %lu) immed: %d\n",
507 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
508 sess->expires, immediate);
511 schedule_delayed_work(&tgt->sess_del_work, 0);
513 schedule_delayed_work(&tgt->sess_del_work,
514 sess->expires - jiffies);
517 /* ha->hardware_lock supposed to be held on entry */
518 static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
520 struct qla_tgt_sess *sess;
522 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
523 qlt_schedule_sess_for_deletion(sess, true);
525 /* At this point tgt could be already dead */
528 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
531 struct qla_hw_data *ha = vha->hw;
532 dma_addr_t gid_list_dma;
533 struct gid_list_info *gid_list;
538 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
539 &gid_list_dma, GFP_KERNEL);
541 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
542 "qla_target(%d): DMA Alloc failed of %u\n",
543 vha->vp_idx, qla2x00_gid_list_size(ha));
547 /* Get list of logged in devices */
548 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
549 if (rc != QLA_SUCCESS) {
550 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
551 "qla_target(%d): get_id_list() failed: %x\n",
554 goto out_free_id_list;
557 id_iter = (char *)gid_list;
559 for (i = 0; i < entries; i++) {
560 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
561 if ((gid->al_pa == s_id[2]) &&
562 (gid->area == s_id[1]) &&
563 (gid->domain == s_id[0])) {
564 *loop_id = le16_to_cpu(gid->loop_id);
568 id_iter += ha->gid_list_info_size;
572 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
573 gid_list, gid_list_dma);
577 /* ha->hardware_lock supposed to be held on entry */
578 static void qlt_undelete_sess(struct qla_tgt_sess *sess)
580 BUG_ON(!sess->deleted);
582 list_del(&sess->del_list_entry);
586 static void qlt_del_sess_work_fn(struct delayed_work *work)
588 struct qla_tgt *tgt = container_of(work, struct qla_tgt,
590 struct scsi_qla_host *vha = tgt->vha;
591 struct qla_hw_data *ha = vha->hw;
592 struct qla_tgt_sess *sess;
593 unsigned long flags, elapsed;
595 spin_lock_irqsave(&ha->hardware_lock, flags);
596 while (!list_empty(&tgt->del_sess_list)) {
597 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
600 if (time_after_eq(elapsed, sess->expires)) {
601 qlt_undelete_sess(sess);
603 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
604 "Timeout: sess %p about to be deleted\n",
606 ha->tgt.tgt_ops->shutdown_sess(sess);
607 ha->tgt.tgt_ops->put_sess(sess);
609 schedule_delayed_work(&tgt->sess_del_work,
610 sess->expires - elapsed);
614 spin_unlock_irqrestore(&ha->hardware_lock, flags);
618 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
619 * Caller must put it.
621 static struct qla_tgt_sess *qlt_create_sess(
622 struct scsi_qla_host *vha,
626 struct qla_hw_data *ha = vha->hw;
627 struct qla_tgt_sess *sess;
629 unsigned char be_sid[3];
631 /* Check to avoid double sessions */
632 spin_lock_irqsave(&ha->hardware_lock, flags);
633 list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
635 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
636 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
637 "Double sess %p found (s_id %x:%x:%x, "
638 "loop_id %d), updating to d_id %x:%x:%x, "
639 "loop_id %d", sess, sess->s_id.b.domain,
640 sess->s_id.b.al_pa, sess->s_id.b.area,
641 sess->loop_id, fcport->d_id.b.domain,
642 fcport->d_id.b.al_pa, fcport->d_id.b.area,
646 qlt_undelete_sess(sess);
648 kref_get(&sess->se_sess->sess_kref);
649 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
650 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
652 if (sess->local && !local)
654 spin_unlock_irqrestore(&ha->hardware_lock, flags);
659 spin_unlock_irqrestore(&ha->hardware_lock, flags);
661 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
663 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
664 "qla_target(%u): session allocation failed, all commands "
665 "from port %8phC will be refused", vha->vp_idx,
670 sess->tgt = vha->vha_tgt.qla_tgt;
672 sess->s_id = fcport->d_id;
673 sess->loop_id = fcport->loop_id;
676 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
677 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
678 sess, vha->vha_tgt.qla_tgt);
680 be_sid[0] = sess->s_id.b.domain;
681 be_sid[1] = sess->s_id.b.area;
682 be_sid[2] = sess->s_id.b.al_pa;
684 * Determine if this fc_port->port_name is allowed to access
685 * target mode using explict NodeACLs+MappedLUNs, or using
686 * TPG demo mode. If this is successful a target mode FC nexus
689 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
690 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
695 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
696 * access across ->hardware_lock reaquire.
698 kref_get(&sess->se_sess->sess_kref);
700 sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
701 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
702 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
704 spin_lock_irqsave(&ha->hardware_lock, flags);
705 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
706 vha->vha_tgt.qla_tgt->sess_count++;
707 spin_unlock_irqrestore(&ha->hardware_lock, flags);
709 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
710 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
711 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
712 vha->vp_idx, local ? "local " : "", fcport->port_name,
713 fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
714 sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
720 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
722 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
724 struct qla_hw_data *ha = vha->hw;
725 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
726 struct qla_tgt_sess *sess;
729 if (!vha->hw->tgt.tgt_ops)
732 if (!tgt || (fcport->port_type != FCT_INITIATOR))
735 if (qla_ini_mode_enabled(vha))
738 spin_lock_irqsave(&ha->hardware_lock, flags);
740 spin_unlock_irqrestore(&ha->hardware_lock, flags);
743 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
745 spin_unlock_irqrestore(&ha->hardware_lock, flags);
747 mutex_lock(&vha->vha_tgt.tgt_mutex);
748 sess = qlt_create_sess(vha, fcport, false);
749 mutex_unlock(&vha->vha_tgt.tgt_mutex);
751 spin_lock_irqsave(&ha->hardware_lock, flags);
753 kref_get(&sess->se_sess->sess_kref);
756 qlt_undelete_sess(sess);
758 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
759 "qla_target(%u): %ssession for port %8phC "
760 "(loop ID %d) reappeared\n", vha->vp_idx,
761 sess->local ? "local " : "", sess->port_name,
764 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
765 "Reappeared sess %p\n", sess);
767 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
768 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
771 if (sess && sess->local) {
772 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
773 "qla_target(%u): local session for "
774 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
775 fcport->port_name, sess->loop_id);
778 ha->tgt.tgt_ops->put_sess(sess);
779 spin_unlock_irqrestore(&ha->hardware_lock, flags);
782 void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
784 struct qla_hw_data *ha = vha->hw;
785 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
786 struct qla_tgt_sess *sess;
789 if (!vha->hw->tgt.tgt_ops)
792 if (!tgt || (fcport->port_type != FCT_INITIATOR))
795 spin_lock_irqsave(&ha->hardware_lock, flags);
797 spin_unlock_irqrestore(&ha->hardware_lock, flags);
800 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
802 spin_unlock_irqrestore(&ha->hardware_lock, flags);
806 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
809 qlt_schedule_sess_for_deletion(sess, false);
810 spin_unlock_irqrestore(&ha->hardware_lock, flags);
813 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
815 struct qla_hw_data *ha = tgt->ha;
819 * We need to protect against race, when tgt is freed before or
822 spin_lock_irqsave(&ha->hardware_lock, flags);
823 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
824 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
825 tgt, list_empty(&tgt->sess_list), tgt->sess_count);
826 res = (tgt->sess_count == 0);
827 spin_unlock_irqrestore(&ha->hardware_lock, flags);
832 /* Called by tcm_qla2xxx configfs code */
833 int qlt_stop_phase1(struct qla_tgt *tgt)
835 struct scsi_qla_host *vha = tgt->vha;
836 struct qla_hw_data *ha = tgt->ha;
839 mutex_lock(&qla_tgt_mutex);
840 if (!vha->fc_vport) {
841 struct Scsi_Host *sh = vha->host;
842 struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
845 spin_lock_irqsave(sh->host_lock, flags);
846 npiv_vports = (fc_host->npiv_vports_inuse);
847 spin_unlock_irqrestore(sh->host_lock, flags);
850 mutex_unlock(&qla_tgt_mutex);
854 if (tgt->tgt_stop || tgt->tgt_stopped) {
855 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
856 "Already in tgt->tgt_stop or tgt_stopped state\n");
857 mutex_unlock(&qla_tgt_mutex);
861 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
864 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
865 * Lock is needed, because we still can get an incoming packet.
867 mutex_lock(&vha->vha_tgt.tgt_mutex);
868 spin_lock_irqsave(&ha->hardware_lock, flags);
870 qlt_clear_tgt_db(tgt, true);
871 spin_unlock_irqrestore(&ha->hardware_lock, flags);
872 mutex_unlock(&vha->vha_tgt.tgt_mutex);
873 mutex_unlock(&qla_tgt_mutex);
875 flush_delayed_work(&tgt->sess_del_work);
877 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
878 "Waiting for sess works (tgt %p)", tgt);
879 spin_lock_irqsave(&tgt->sess_work_lock, flags);
880 while (!list_empty(&tgt->sess_works_list)) {
881 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
882 flush_scheduled_work();
883 spin_lock_irqsave(&tgt->sess_work_lock, flags);
885 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
887 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
888 "Waiting for tgt %p: list_empty(sess_list)=%d "
889 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
892 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
895 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
896 qlt_disable_vha(vha);
898 /* Wait for sessions to clear out (just in case) */
899 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
902 EXPORT_SYMBOL(qlt_stop_phase1);
904 /* Called by tcm_qla2xxx configfs code */
905 void qlt_stop_phase2(struct qla_tgt *tgt)
907 struct qla_hw_data *ha = tgt->ha;
908 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
911 if (tgt->tgt_stopped) {
912 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
913 "Already in tgt->tgt_stopped state\n");
918 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
919 "Waiting for %d IRQ commands to complete (tgt %p)",
920 tgt->irq_cmd_count, tgt);
922 mutex_lock(&vha->vha_tgt.tgt_mutex);
923 spin_lock_irqsave(&ha->hardware_lock, flags);
924 while (tgt->irq_cmd_count != 0) {
925 spin_unlock_irqrestore(&ha->hardware_lock, flags);
927 spin_lock_irqsave(&ha->hardware_lock, flags);
930 tgt->tgt_stopped = 1;
931 spin_unlock_irqrestore(&ha->hardware_lock, flags);
932 mutex_unlock(&vha->vha_tgt.tgt_mutex);
934 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
937 EXPORT_SYMBOL(qlt_stop_phase2);
939 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
940 static void qlt_release(struct qla_tgt *tgt)
942 scsi_qla_host_t *vha = tgt->vha;
944 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
945 qlt_stop_phase2(tgt);
947 vha->vha_tgt.qla_tgt = NULL;
949 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
950 "Release of tgt %p finished\n", tgt);
955 /* ha->hardware_lock supposed to be held on entry */
956 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
957 const void *param, unsigned int param_size)
959 struct qla_tgt_sess_work_param *prm;
962 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
964 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
965 "qla_target(%d): Unable to create session "
966 "work, command will be refused", 0);
970 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
971 "Scheduling work (type %d, prm %p)"
972 " to find session for param %p (size %d, tgt %p)\n",
973 type, prm, param, param_size, tgt);
976 memcpy(&prm->tm_iocb, param, param_size);
978 spin_lock_irqsave(&tgt->sess_work_lock, flags);
979 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
980 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
982 schedule_work(&tgt->sess_work);
988 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
990 static void qlt_send_notify_ack(struct scsi_qla_host *vha,
991 struct imm_ntfy_from_isp *ntfy,
992 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
993 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
995 struct qla_hw_data *ha = vha->hw;
997 struct nack_to_isp *nack;
999 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1001 /* Send marker if required */
1002 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1005 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1007 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1008 "qla_target(%d): %s failed: unable to allocate "
1009 "request packet\n", vha->vp_idx, __func__);
1013 if (vha->vha_tgt.qla_tgt != NULL)
1014 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1016 pkt->entry_type = NOTIFY_ACK_TYPE;
1017 pkt->entry_count = 1;
1019 nack = (struct nack_to_isp *)pkt;
1020 nack->ox_id = ntfy->ox_id;
1022 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1023 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1024 nack->u.isp24.flags = ntfy->u.isp24.flags &
1025 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1027 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1028 nack->u.isp24.status = ntfy->u.isp24.status;
1029 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1030 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1031 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1032 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1033 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1034 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1035 nack->u.isp24.srr_reject_code = srr_reject_code;
1036 nack->u.isp24.srr_reject_code_expl = srr_explan;
1037 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1039 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1040 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1041 vha->vp_idx, nack->u.isp24.status);
1043 /* Memory Barrier */
1045 qla2x00_start_iocbs(vha, vha->req);
1049 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1051 static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1052 struct abts_recv_from_24xx *abts, uint32_t status,
1055 struct qla_hw_data *ha = vha->hw;
1056 struct abts_resp_to_24xx *resp;
1060 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1061 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1064 /* Send marker if required */
1065 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1068 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1070 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1071 "qla_target(%d): %s failed: unable to allocate "
1072 "request packet", vha->vp_idx, __func__);
1076 resp->entry_type = ABTS_RESP_24XX;
1077 resp->entry_count = 1;
1078 resp->nport_handle = abts->nport_handle;
1079 resp->vp_index = vha->vp_idx;
1080 resp->sof_type = abts->sof_type;
1081 resp->exchange_address = abts->exchange_address;
1082 resp->fcp_hdr_le = abts->fcp_hdr_le;
1083 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1084 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1085 F_CTL_SEQ_INITIATIVE);
1086 p = (uint8_t *)&f_ctl;
1087 resp->fcp_hdr_le.f_ctl[0] = *p++;
1088 resp->fcp_hdr_le.f_ctl[1] = *p++;
1089 resp->fcp_hdr_le.f_ctl[2] = *p;
1091 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1092 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1093 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1094 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1095 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1096 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1098 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1099 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1100 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1101 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1102 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1103 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1105 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1106 if (status == FCP_TMF_CMPL) {
1107 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1108 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1109 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1110 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1111 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1112 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1114 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1115 resp->payload.ba_rjt.reason_code =
1116 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1117 /* Other bytes are zero */
1120 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1122 /* Memory Barrier */
1124 qla2x00_start_iocbs(vha, vha->req);
1128 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1130 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1131 struct abts_resp_from_24xx_fw *entry)
1133 struct ctio7_to_24xx *ctio;
1135 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1136 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1137 /* Send marker if required */
1138 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1141 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1143 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1144 "qla_target(%d): %s failed: unable to allocate "
1145 "request packet\n", vha->vp_idx, __func__);
1150 * We've got on entrance firmware's response on by us generated
1151 * ABTS response. So, in it ID fields are reversed.
1154 ctio->entry_type = CTIO_TYPE7;
1155 ctio->entry_count = 1;
1156 ctio->nport_handle = entry->nport_handle;
1157 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1158 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1159 ctio->vp_index = vha->vp_idx;
1160 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1161 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1162 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1163 ctio->exchange_addr = entry->exchange_addr_to_abort;
1164 ctio->u.status1.flags =
1165 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1166 CTIO7_FLAGS_TERMINATE);
1167 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1169 /* Memory Barrier */
1171 qla2x00_start_iocbs(vha, vha->req);
1173 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1174 FCP_TMF_CMPL, true);
1177 /* ha->hardware_lock supposed to be held on entry */
1178 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1179 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1181 struct qla_hw_data *ha = vha->hw;
1182 struct se_session *se_sess = sess->se_sess;
1183 struct qla_tgt_mgmt_cmd *mcmd;
1184 struct se_cmd *se_cmd;
1187 bool found_lun = false;
1189 spin_lock(&se_sess->sess_cmd_lock);
1190 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1191 struct qla_tgt_cmd *cmd =
1192 container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
1193 if (cmd->tag == abts->exchange_addr_to_abort) {
1194 lun = cmd->unpacked_lun;
1199 spin_unlock(&se_sess->sess_cmd_lock);
1204 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1205 "qla_target(%d): task abort (tag=%d)\n",
1206 vha->vp_idx, abts->exchange_addr_to_abort);
1208 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1210 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1211 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1212 vha->vp_idx, __func__);
1215 memset(mcmd, 0, sizeof(*mcmd));
1218 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1219 mcmd->reset_count = vha->hw->chip_reset;
1221 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
1222 abts->exchange_addr_to_abort);
1224 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1225 "qla_target(%d): tgt_ops->handle_tmr()"
1226 " failed: %d", vha->vp_idx, rc);
1227 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1235 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1237 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1238 struct abts_recv_from_24xx *abts)
1240 struct qla_hw_data *ha = vha->hw;
1241 struct qla_tgt_sess *sess;
1242 uint32_t tag = abts->exchange_addr_to_abort;
1246 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1247 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1248 "qla_target(%d): ABTS: Abort Sequence not "
1249 "supported\n", vha->vp_idx);
1250 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1254 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1255 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1256 "qla_target(%d): ABTS: Unknown Exchange "
1257 "Address received\n", vha->vp_idx);
1258 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1262 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1263 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1264 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1265 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1266 le32_to_cpu(abts->fcp_hdr_le.parameter));
1268 s_id[0] = abts->fcp_hdr_le.s_id[2];
1269 s_id[1] = abts->fcp_hdr_le.s_id[1];
1270 s_id[2] = abts->fcp_hdr_le.s_id[0];
1272 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1274 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1275 "qla_target(%d): task abort for non-existant session\n",
1277 rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
1278 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1280 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1286 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1288 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1289 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1291 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1297 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1299 static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1300 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1302 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1303 struct ctio7_to_24xx *ctio;
1306 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1307 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1308 ha, atio, resp_code);
1310 /* Send marker if required */
1311 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1314 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1316 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1317 "qla_target(%d): %s failed: unable to allocate "
1318 "request packet\n", ha->vp_idx, __func__);
1322 ctio->entry_type = CTIO_TYPE7;
1323 ctio->entry_count = 1;
1324 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1325 ctio->nport_handle = mcmd->sess->loop_id;
1326 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1327 ctio->vp_index = ha->vp_idx;
1328 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1329 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1330 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1331 ctio->exchange_addr = atio->u.isp24.exchange_addr;
1332 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1333 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1334 CTIO7_FLAGS_SEND_STATUS);
1335 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1336 ctio->u.status1.ox_id = cpu_to_le16(temp);
1337 ctio->u.status1.scsi_status =
1338 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1339 ctio->u.status1.response_len = __constant_cpu_to_le16(8);
1340 ctio->u.status1.sense_data[0] = resp_code;
1342 /* Memory Barrier */
1344 qla2x00_start_iocbs(ha, ha->req);
1347 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1349 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1351 EXPORT_SYMBOL(qlt_free_mcmd);
1353 /* callback from target fabric module code */
1354 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1356 struct scsi_qla_host *vha = mcmd->sess->vha;
1357 struct qla_hw_data *ha = vha->hw;
1358 unsigned long flags;
1360 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1361 "TM response mcmd (%p) status %#x state %#x",
1362 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1364 spin_lock_irqsave(&ha->hardware_lock, flags);
1366 if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
1368 * Either a chip reset is active or this request was from
1369 * previous life, just abort the processing.
1371 ql_dbg(ql_dbg_async, vha, 0xe100,
1372 "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
1373 qla2x00_reset_active(vha), mcmd->reset_count,
1375 ha->tgt.tgt_ops->free_mcmd(mcmd);
1376 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1380 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1381 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1384 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
1385 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1386 mcmd->fc_tm_rsp, false);
1388 qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1392 * Make the callback for ->free_mcmd() to queue_work() and invoke
1393 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1394 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1395 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1396 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1397 * qlt_xmit_tm_rsp() returns here..
1399 ha->tgt.tgt_ops->free_mcmd(mcmd);
1400 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1402 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1405 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1407 struct qla_tgt_cmd *cmd = prm->cmd;
1409 BUG_ON(cmd->sg_cnt == 0);
1411 prm->sg = (struct scatterlist *)cmd->sg;
1412 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1413 cmd->sg_cnt, cmd->dma_data_direction);
1414 if (unlikely(prm->seg_cnt == 0))
1417 prm->cmd->sg_mapped = 1;
1419 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
1421 * If greater than four sg entries then we need to allocate
1422 * the continuation entries
1424 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1425 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1426 prm->tgt->datasegs_per_cmd,
1427 prm->tgt->datasegs_per_cont);
1430 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1431 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1432 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
1433 prm->tot_dsds = prm->seg_cnt;
1435 prm->tot_dsds = prm->seg_cnt;
1437 if (cmd->prot_sg_cnt) {
1438 prm->prot_sg = cmd->prot_sg;
1439 prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
1440 cmd->prot_sg, cmd->prot_sg_cnt,
1441 cmd->dma_data_direction);
1442 if (unlikely(prm->prot_seg_cnt == 0))
1445 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
1446 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
1447 /* Dif Bundling not support here */
1448 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
1450 prm->tot_dsds += prm->prot_seg_cnt;
1452 prm->tot_dsds += prm->prot_seg_cnt;
1459 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1460 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1461 0, prm->cmd->sg_cnt);
1465 static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
1466 struct qla_tgt_cmd *cmd)
1468 struct qla_hw_data *ha = vha->hw;
1470 BUG_ON(!cmd->sg_mapped);
1471 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1474 if (cmd->prot_sg_cnt)
1475 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
1476 cmd->dma_data_direction);
1478 if (cmd->ctx_dsd_alloced)
1479 qla2x00_clean_dsd_pool(ha, NULL, cmd);
1482 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
1485 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1488 uint32_t cnt, cnt_in;
1490 if (vha->req->cnt < (req_cnt + 2)) {
1491 cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
1492 cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
1494 if (vha->req->ring_index < cnt)
1495 vha->req->cnt = cnt - vha->req->ring_index;
1497 vha->req->cnt = vha->req->length -
1498 (vha->req->ring_index - cnt);
1501 if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1502 ql_dbg(ql_dbg_io, vha, 0x305a,
1503 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
1504 vha->vp_idx, vha->req->ring_index,
1505 vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
1508 vha->req->cnt -= req_cnt;
1514 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1516 static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1518 /* Adjust ring index. */
1519 vha->req->ring_index++;
1520 if (vha->req->ring_index == vha->req->length) {
1521 vha->req->ring_index = 0;
1522 vha->req->ring_ptr = vha->req->ring;
1524 vha->req->ring_ptr++;
1526 return (cont_entry_t *)vha->req->ring_ptr;
1529 /* ha->hardware_lock supposed to be held on entry */
1530 static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1532 struct qla_hw_data *ha = vha->hw;
1535 h = ha->tgt.current_handle;
1536 /* always increment cmd handle */
1539 if (h > DEFAULT_OUTSTANDING_COMMANDS)
1540 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1541 if (h == ha->tgt.current_handle) {
1542 ql_dbg(ql_dbg_io, vha, 0x305b,
1543 "qla_target(%d): Ran out of "
1544 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1545 h = QLA_TGT_NULL_HANDLE;
1548 } while ((h == QLA_TGT_NULL_HANDLE) ||
1549 (h == QLA_TGT_SKIP_HANDLE) ||
1550 (ha->tgt.cmds[h-1] != NULL));
1552 if (h != QLA_TGT_NULL_HANDLE)
1553 ha->tgt.current_handle = h;
1558 /* ha->hardware_lock supposed to be held on entry */
1559 static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1560 struct scsi_qla_host *vha)
1563 struct ctio7_to_24xx *pkt;
1564 struct qla_hw_data *ha = vha->hw;
1565 struct atio_from_isp *atio = &prm->cmd->atio;
1568 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1570 memset(pkt, 0, sizeof(*pkt));
1572 pkt->entry_type = CTIO_TYPE7;
1573 pkt->entry_count = (uint8_t)prm->req_cnt;
1574 pkt->vp_index = vha->vp_idx;
1576 h = qlt_make_handle(vha);
1577 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1579 * CTIO type 7 from the firmware doesn't provide a way to
1580 * know the initiator's LOOP ID, hence we can't find
1581 * the session and, so, the command.
1585 ha->tgt.cmds[h-1] = prm->cmd;
1587 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1588 pkt->nport_handle = prm->cmd->loop_id;
1589 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1590 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1591 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1592 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1593 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1594 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1595 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1596 pkt->u.status0.ox_id = cpu_to_le16(temp);
1597 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1603 * ha->hardware_lock supposed to be held on entry. We have already made sure
1604 * that there is sufficient amount of request entries to not drop it.
1606 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1607 struct scsi_qla_host *vha)
1610 uint32_t *dword_ptr;
1611 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1613 /* Build continuation packets */
1614 while (prm->seg_cnt > 0) {
1615 cont_a64_entry_t *cont_pkt64 =
1616 (cont_a64_entry_t *)qlt_get_req_pkt(vha);
1619 * Make sure that from cont_pkt64 none of
1620 * 64-bit specific fields used for 32-bit
1621 * addressing. Cast to (cont_entry_t *) for
1625 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1627 cont_pkt64->entry_count = 1;
1628 cont_pkt64->sys_define = 0;
1630 if (enable_64bit_addressing) {
1631 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1633 (uint32_t *)&cont_pkt64->dseg_0_address;
1635 cont_pkt64->entry_type = CONTINUE_TYPE;
1637 (uint32_t *)&((cont_entry_t *)
1638 cont_pkt64)->dseg_0_address;
1641 /* Load continuation entry data segments */
1643 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1644 cnt++, prm->seg_cnt--) {
1646 cpu_to_le32(pci_dma_lo32
1647 (sg_dma_address(prm->sg)));
1648 if (enable_64bit_addressing) {
1650 cpu_to_le32(pci_dma_hi32
1654 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1656 prm->sg = sg_next(prm->sg);
1662 * ha->hardware_lock supposed to be held on entry. We have already made sure
1663 * that there is sufficient amount of request entries to not drop it.
1665 static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1666 struct scsi_qla_host *vha)
1669 uint32_t *dword_ptr;
1670 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1671 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
1673 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
1675 /* Setup packet address segment pointer */
1676 dword_ptr = pkt24->u.status0.dseg_0_address;
1678 /* Set total data segment count */
1680 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
1682 if (prm->seg_cnt == 0) {
1683 /* No data transfer */
1689 /* If scatter gather */
1691 /* Load command entry data segments */
1693 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1694 cnt++, prm->seg_cnt--) {
1696 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1697 if (enable_64bit_addressing) {
1699 cpu_to_le32(pci_dma_hi32(
1700 sg_dma_address(prm->sg)));
1702 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1704 prm->sg = sg_next(prm->sg);
1707 qlt_load_cont_data_segments(prm, vha);
1710 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
1712 return cmd->bufflen > 0;
1716 * Called without ha->hardware_lock held
1718 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1719 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
1720 uint32_t *full_req_cnt)
1722 struct qla_tgt *tgt = cmd->tgt;
1723 struct scsi_qla_host *vha = tgt->vha;
1724 struct qla_hw_data *ha = vha->hw;
1725 struct se_cmd *se_cmd = &cmd->se_cmd;
1727 if (unlikely(cmd->aborted)) {
1728 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1729 "qla_target(%d): terminating exchange "
1730 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
1733 cmd->state = QLA_TGT_STATE_ABORTED;
1734 cmd->cmd_flags |= BIT_6;
1736 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1738 /* !! At this point cmd could be already freed !! */
1739 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1744 prm->rq_result = scsi_status;
1745 prm->sense_buffer = &cmd->sense_buffer[0];
1746 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
1750 prm->add_status_pkt = 0;
1752 /* Send marker if required */
1753 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
1756 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
1757 if (qlt_pci_map_calc_cnt(prm) != 0)
1761 *full_req_cnt = prm->req_cnt;
1763 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1764 prm->residual = se_cmd->residual_count;
1765 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
1766 "Residual underflow: %d (tag %d, "
1767 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1768 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1769 cmd->bufflen, prm->rq_result);
1770 prm->rq_result |= SS_RESIDUAL_UNDER;
1771 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1772 prm->residual = se_cmd->residual_count;
1773 ql_dbg(ql_dbg_io, vha, 0x305d,
1774 "Residual overflow: %d (tag %d, "
1775 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1776 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1777 cmd->bufflen, prm->rq_result);
1778 prm->rq_result |= SS_RESIDUAL_OVER;
1781 if (xmit_type & QLA_TGT_XMIT_STATUS) {
1783 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1784 * ignored in *xmit_response() below
1786 if (qlt_has_data(cmd)) {
1787 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
1788 (IS_FWI2_CAPABLE(ha) &&
1789 (prm->rq_result != 0))) {
1790 prm->add_status_pkt = 1;
1799 static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
1800 struct qla_tgt_cmd *cmd, int sending_sense)
1802 if (ha->tgt.enable_class_2)
1806 return cmd->conf_compl_supported;
1808 return ha->tgt.enable_explicit_conf &&
1809 cmd->conf_compl_supported;
1812 #ifdef CONFIG_QLA_TGT_DEBUG_SRR
1814 * Original taken from the XFS code
1816 static unsigned long qlt_srr_random(void)
1819 static unsigned long RandomValue;
1820 static DEFINE_SPINLOCK(lock);
1821 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1825 unsigned long flags;
1827 spin_lock_irqsave(&lock, flags);
1829 RandomValue = jiffies;
1835 rv = 16807 * lo - 2836 * hi;
1839 spin_unlock_irqrestore(&lock, flags);
1843 static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1845 #if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1846 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
1848 *xmit_type &= ~QLA_TGT_XMIT_STATUS;
1849 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
1850 "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
1854 * It's currently not possible to simulate SRRs for FCP_WRITE without
1855 * a physical link layer failure, so don't even try here..
1857 if (cmd->dma_data_direction != DMA_FROM_DEVICE)
1860 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
1861 ((qlt_srr_random() % 100) == 20)) {
1863 unsigned int tot_len = 0;
1866 leave = qlt_srr_random() % cmd->sg_cnt;
1868 for (i = 0; i < leave; i++)
1869 tot_len += cmd->sg[i].length;
1871 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
1872 "Cutting cmd %p (tag %d) buffer"
1873 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1874 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
1875 cmd->bufflen, cmd->sg_cnt);
1877 cmd->bufflen = tot_len;
1878 cmd->sg_cnt = leave;
1881 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
1882 unsigned int offset = qlt_srr_random() % cmd->bufflen;
1884 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
1885 "Cutting cmd %p (tag %d) buffer head "
1886 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
1889 *xmit_type &= ~QLA_TGT_XMIT_DATA;
1890 else if (qlt_set_data_offset(cmd, offset)) {
1891 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
1892 "qlt_set_data_offset() failed (tag %d)", cmd->tag);
1897 static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1901 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
1902 struct qla_tgt_prm *prm)
1904 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
1905 (uint32_t)sizeof(ctio->u.status1.sense_data));
1906 ctio->u.status0.flags |=
1907 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
1908 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1909 ctio->u.status0.flags |= __constant_cpu_to_le16(
1910 CTIO7_FLAGS_EXPLICIT_CONFORM |
1911 CTIO7_FLAGS_CONFORM_REQ);
1913 ctio->u.status0.residual = cpu_to_le32(prm->residual);
1914 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
1915 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
1918 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1919 if (prm->cmd->se_cmd.scsi_status != 0) {
1920 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
1921 "Skipping EXPLICIT_CONFORM and "
1922 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1923 "non GOOD status\n");
1924 goto skip_explict_conf;
1926 ctio->u.status1.flags |= __constant_cpu_to_le16(
1927 CTIO7_FLAGS_EXPLICIT_CONFORM |
1928 CTIO7_FLAGS_CONFORM_REQ);
1931 ctio->u.status1.flags &=
1932 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
1933 ctio->u.status1.flags |=
1934 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
1935 ctio->u.status1.scsi_status |=
1936 __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
1937 ctio->u.status1.sense_length =
1938 cpu_to_le16(prm->sense_buffer_len);
1939 for (i = 0; i < prm->sense_buffer_len/4; i++)
1940 ((uint32_t *)ctio->u.status1.sense_data)[i] =
1941 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
1943 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
1946 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
1947 "qla_target(%d): %d bytes of sense "
1948 "lost", prm->tgt->ha->vp_idx,
1949 prm->sense_buffer_len % 4);
1955 ctio->u.status1.flags &=
1956 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
1957 ctio->u.status1.flags |=
1958 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
1959 ctio->u.status1.sense_length = 0;
1960 memset(ctio->u.status1.sense_data, 0,
1961 sizeof(ctio->u.status1.sense_data));
1964 /* Sense with len > 24, is it possible ??? */
1971 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
1974 * Uncomment when corresponding SCSI changes are done.
1976 if (!sp->cmd->prot_chk)
1980 switch (se_cmd->prot_op) {
1981 case TARGET_PROT_DOUT_INSERT:
1982 case TARGET_PROT_DIN_STRIP:
1983 if (ql2xenablehba_err_chk >= 1)
1986 case TARGET_PROT_DOUT_PASS:
1987 case TARGET_PROT_DIN_PASS:
1988 if (ql2xenablehba_err_chk >= 2)
1991 case TARGET_PROT_DIN_INSERT:
1992 case TARGET_PROT_DOUT_STRIP:
2001 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
2005 qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
2007 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2009 /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
2010 * have been immplemented by TCM, before AppTag is avail.
2011 * Look for modesense_handlers[]
2014 ctx->app_tag_mask[0] = 0x0;
2015 ctx->app_tag_mask[1] = 0x0;
2017 switch (se_cmd->prot_type) {
2018 case TARGET_DIF_TYPE0_PROT:
2020 * No check for ql2xenablehba_err_chk, as it would be an
2021 * I/O error if hba tag generation is not done.
2023 ctx->ref_tag = cpu_to_le32(lba);
2025 if (!qlt_hba_err_chk_enabled(se_cmd))
2028 /* enable ALL bytes of the ref tag */
2029 ctx->ref_tag_mask[0] = 0xff;
2030 ctx->ref_tag_mask[1] = 0xff;
2031 ctx->ref_tag_mask[2] = 0xff;
2032 ctx->ref_tag_mask[3] = 0xff;
2035 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
2038 case TARGET_DIF_TYPE1_PROT:
2039 ctx->ref_tag = cpu_to_le32(lba);
2041 if (!qlt_hba_err_chk_enabled(se_cmd))
2044 /* enable ALL bytes of the ref tag */
2045 ctx->ref_tag_mask[0] = 0xff;
2046 ctx->ref_tag_mask[1] = 0xff;
2047 ctx->ref_tag_mask[2] = 0xff;
2048 ctx->ref_tag_mask[3] = 0xff;
2051 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
2052 * match LBA in CDB + N
2054 case TARGET_DIF_TYPE2_PROT:
2055 ctx->ref_tag = cpu_to_le32(lba);
2057 if (!qlt_hba_err_chk_enabled(se_cmd))
2060 /* enable ALL bytes of the ref tag */
2061 ctx->ref_tag_mask[0] = 0xff;
2062 ctx->ref_tag_mask[1] = 0xff;
2063 ctx->ref_tag_mask[2] = 0xff;
2064 ctx->ref_tag_mask[3] = 0xff;
2067 /* For Type 3 protection: 16 bit GUARD only */
2068 case TARGET_DIF_TYPE3_PROT:
2069 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2070 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2077 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2081 uint32_t transfer_length = 0;
2082 uint32_t data_bytes;
2084 uint8_t bundling = 1;
2086 struct crc_context *crc_ctx_pkt = NULL;
2087 struct qla_hw_data *ha;
2088 struct ctio_crc2_to_fw *pkt;
2089 dma_addr_t crc_ctx_dma;
2090 uint16_t fw_prot_opts = 0;
2091 struct qla_tgt_cmd *cmd = prm->cmd;
2092 struct se_cmd *se_cmd = &cmd->se_cmd;
2094 struct atio_from_isp *atio = &prm->cmd->atio;
2100 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
2102 memset(pkt, 0, sizeof(*pkt));
2104 ql_dbg(ql_dbg_tgt, vha, 0xe071,
2105 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
2106 vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
2107 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
2109 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
2110 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
2113 /* Compute dif len and adjust data len to incude protection */
2114 data_bytes = cmd->bufflen;
2115 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
2117 switch (se_cmd->prot_op) {
2118 case TARGET_PROT_DIN_INSERT:
2119 case TARGET_PROT_DOUT_STRIP:
2120 transfer_length = data_bytes;
2121 data_bytes += dif_bytes;
2124 case TARGET_PROT_DIN_STRIP:
2125 case TARGET_PROT_DOUT_INSERT:
2126 case TARGET_PROT_DIN_PASS:
2127 case TARGET_PROT_DOUT_PASS:
2128 transfer_length = data_bytes + dif_bytes;
2136 if (!qlt_hba_err_chk_enabled(se_cmd))
2137 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
2138 /* HBA error checking enabled */
2139 else if (IS_PI_UNINIT_CAPABLE(ha)) {
2140 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2141 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2142 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
2143 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2144 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2147 switch (se_cmd->prot_op) {
2148 case TARGET_PROT_DIN_INSERT:
2149 case TARGET_PROT_DOUT_INSERT:
2150 fw_prot_opts |= PO_MODE_DIF_INSERT;
2152 case TARGET_PROT_DIN_STRIP:
2153 case TARGET_PROT_DOUT_STRIP:
2154 fw_prot_opts |= PO_MODE_DIF_REMOVE;
2156 case TARGET_PROT_DIN_PASS:
2157 case TARGET_PROT_DOUT_PASS:
2158 fw_prot_opts |= PO_MODE_DIF_PASS;
2159 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
2161 default:/* Normal Request */
2162 fw_prot_opts |= PO_MODE_DIF_PASS;
2168 /* Update entry type to indicate Command Type CRC_2 IOCB */
2169 pkt->entry_type = CTIO_CRC2;
2170 pkt->entry_count = 1;
2171 pkt->vp_index = vha->vp_idx;
2173 h = qlt_make_handle(vha);
2174 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2176 * CTIO type 7 from the firmware doesn't provide a way to
2177 * know the initiator's LOOP ID, hence we can't find
2178 * the session and, so, the command.
2182 ha->tgt.cmds[h-1] = prm->cmd;
2185 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2186 pkt->nport_handle = prm->cmd->loop_id;
2187 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2188 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2189 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2190 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2191 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2193 /* silence compile warning */
2194 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2195 pkt->ox_id = cpu_to_le16(t16);
2197 t16 = (atio->u.isp24.attr << 9);
2198 pkt->flags |= cpu_to_le16(t16);
2199 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
2201 /* Set transfer direction */
2202 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2203 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2204 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2205 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2208 pkt->dseg_count = prm->tot_dsds;
2209 /* Fibre channel byte count */
2210 pkt->transfer_length = cpu_to_le32(transfer_length);
2213 /* ----- CRC context -------- */
2215 /* Allocate CRC context from global pool */
2216 crc_ctx_pkt = cmd->ctx =
2217 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
2220 goto crc_queuing_error;
2222 /* Zero out CTX area. */
2223 clr_ptr = (uint8_t *)crc_ctx_pkt;
2224 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
2226 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
2227 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
2230 crc_ctx_pkt->handle = pkt->handle;
2232 qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
2234 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
2235 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
2236 pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
2240 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
2243 * Configure Bundling if we need to fetch interlaving
2244 * protection PCI accesses
2246 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
2247 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
2248 crc_ctx_pkt->u.bundling.dseg_count =
2249 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
2250 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
2253 /* Finish the common fields of CRC pkt */
2254 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
2255 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
2256 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2257 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
2260 /* Walks data segments */
2261 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2263 if (!bundling && prm->prot_seg_cnt) {
2264 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
2265 prm->tot_dsds, cmd))
2266 goto crc_queuing_error;
2267 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
2268 (prm->tot_dsds - prm->prot_seg_cnt), cmd))
2269 goto crc_queuing_error;
2271 if (bundling && prm->prot_seg_cnt) {
2272 /* Walks dif segments */
2273 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
2275 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
2276 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
2277 prm->prot_seg_cnt, cmd))
2278 goto crc_queuing_error;
2283 /* Cleanup will be performed by the caller */
2285 return QLA_FUNCTION_FAILED;
2290 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2291 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2293 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2294 uint8_t scsi_status)
2296 struct scsi_qla_host *vha = cmd->vha;
2297 struct qla_hw_data *ha = vha->hw;
2298 struct ctio7_to_24xx *pkt;
2299 struct qla_tgt_prm prm;
2300 uint32_t full_req_cnt = 0;
2301 unsigned long flags = 0;
2304 memset(&prm, 0, sizeof(prm));
2305 qlt_check_srr_debug(cmd, &xmit_type);
2307 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
2308 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
2309 (xmit_type & QLA_TGT_XMIT_STATUS) ?
2310 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
2313 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2315 if (unlikely(res != 0)) {
2316 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2322 spin_lock_irqsave(&ha->hardware_lock, flags);
2324 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2326 * Either a chip reset is active or this request was from
2327 * previous life, just abort the processing.
2329 cmd->state = QLA_TGT_STATE_PROCESSED;
2330 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2331 ql_dbg(ql_dbg_async, vha, 0xe101,
2332 "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
2333 qla2x00_reset_active(vha), cmd->reset_count,
2335 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2339 /* Does F/W have an IOCBs for this request */
2340 res = qlt_check_reserve_free_req(vha, full_req_cnt);
2342 goto out_unmap_unlock;
2344 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
2345 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2347 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2348 if (unlikely(res != 0))
2349 goto out_unmap_unlock;
2352 pkt = (struct ctio7_to_24xx *)prm.pkt;
2354 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2355 pkt->u.status0.flags |=
2356 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2357 CTIO7_FLAGS_STATUS_MODE_0);
2359 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2360 qlt_load_data_segments(&prm, vha);
2362 if (prm.add_status_pkt == 0) {
2363 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2364 pkt->u.status0.scsi_status =
2365 cpu_to_le16(prm.rq_result);
2366 pkt->u.status0.residual =
2367 cpu_to_le32(prm.residual);
2368 pkt->u.status0.flags |= __constant_cpu_to_le16(
2369 CTIO7_FLAGS_SEND_STATUS);
2370 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2371 pkt->u.status0.flags |=
2372 __constant_cpu_to_le16(
2373 CTIO7_FLAGS_EXPLICIT_CONFORM |
2374 CTIO7_FLAGS_CONFORM_REQ);
2380 * We have already made sure that there is sufficient
2381 * amount of request entries to not drop HW lock in
2384 struct ctio7_to_24xx *ctio =
2385 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2387 ql_dbg(ql_dbg_io, vha, 0x305e,
2388 "Building additional status packet 0x%p.\n",
2392 * T10Dif: ctio_crc2_to_fw overlay ontop of
2395 memcpy(ctio, pkt, sizeof(*ctio));
2396 /* reset back to CTIO7 */
2397 ctio->entry_count = 1;
2398 ctio->entry_type = CTIO_TYPE7;
2399 ctio->dseg_count = 0;
2400 ctio->u.status1.flags &= ~__constant_cpu_to_le16(
2401 CTIO7_FLAGS_DATA_IN);
2403 /* Real finish is ctio_m1's finish */
2404 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2405 pkt->u.status0.flags |= __constant_cpu_to_le16(
2406 CTIO7_FLAGS_DONT_RET_CTIO);
2408 /* qlt_24xx_init_ctio_to_isp will correct
2409 * all neccessary fields that's part of CTIO7.
2410 * There should be no residual of CTIO-CRC2 data.
2412 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2414 pr_debug("Status CTIO7: %p\n", ctio);
2417 qlt_24xx_init_ctio_to_isp(pkt, &prm);
2420 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2421 cmd->cmd_sent_to_fw = 1;
2423 /* Memory Barrier */
2425 qla2x00_start_iocbs(vha, vha->req);
2426 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2432 qlt_unmap_sg(vha, cmd);
2433 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2437 EXPORT_SYMBOL(qlt_xmit_response);
2439 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2441 struct ctio7_to_24xx *pkt;
2442 struct scsi_qla_host *vha = cmd->vha;
2443 struct qla_hw_data *ha = vha->hw;
2444 struct qla_tgt *tgt = cmd->tgt;
2445 struct qla_tgt_prm prm;
2446 unsigned long flags;
2449 memset(&prm, 0, sizeof(prm));
2455 /* Send marker if required */
2456 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2459 /* Calculate number of entries and segments required */
2460 if (qlt_pci_map_calc_cnt(&prm) != 0)
2463 spin_lock_irqsave(&ha->hardware_lock, flags);
2465 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2467 * Either a chip reset is active or this request was from
2468 * previous life, just abort the processing.
2470 cmd->state = QLA_TGT_STATE_NEED_DATA;
2471 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2472 ql_dbg(ql_dbg_async, vha, 0xe102,
2473 "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
2474 qla2x00_reset_active(vha), cmd->reset_count,
2476 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2480 /* Does F/W have an IOCBs for this request */
2481 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2483 goto out_unlock_free_unmap;
2484 if (cmd->se_cmd.prot_op)
2485 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2487 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2489 if (unlikely(res != 0))
2490 goto out_unlock_free_unmap;
2491 pkt = (struct ctio7_to_24xx *)prm.pkt;
2492 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2493 CTIO7_FLAGS_STATUS_MODE_0);
2495 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
2496 qlt_load_data_segments(&prm, vha);
2498 cmd->state = QLA_TGT_STATE_NEED_DATA;
2499 cmd->cmd_sent_to_fw = 1;
2501 /* Memory Barrier */
2503 qla2x00_start_iocbs(vha, vha->req);
2504 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2508 out_unlock_free_unmap:
2510 qlt_unmap_sg(vha, cmd);
2511 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2515 EXPORT_SYMBOL(qlt_rdy_to_xfer);
2519 * Checks the guard or meta-data for the type of error
2520 * detected by the HBA.
2523 qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
2524 struct ctio_crc_from_fw *sts)
2526 uint8_t *ap = &sts->actual_dif[0];
2527 uint8_t *ep = &sts->expected_dif[0];
2528 uint32_t e_ref_tag, a_ref_tag;
2529 uint16_t e_app_tag, a_app_tag;
2530 uint16_t e_guard, a_guard;
2531 uint64_t lba = cmd->se_cmd.t_task_lba;
2533 a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
2534 a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
2535 a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
2537 e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
2538 e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
2539 e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
2541 ql_dbg(ql_dbg_tgt, vha, 0xe075,
2542 "iocb(s) %p Returned STATUS.\n", sts);
2544 ql_dbg(ql_dbg_tgt, vha, 0xf075,
2545 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2546 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2547 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
2551 * For type 3: ref & app tag is all 'f's
2552 * For type 0,1,2: app tag is all 'f's
2554 if ((a_app_tag == 0xffff) &&
2555 ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
2556 (a_ref_tag == 0xffffffff))) {
2557 uint32_t blocks_done;
2559 /* 2TB boundary case covered automatically with this */
2560 blocks_done = e_ref_tag - (uint32_t)lba + 1;
2561 cmd->se_cmd.bad_sector = e_ref_tag;
2562 cmd->se_cmd.pi_err = 0;
2563 ql_dbg(ql_dbg_tgt, vha, 0xf074,
2564 "need to return scsi good\n");
2566 /* Update protection tag */
2567 if (cmd->prot_sg_cnt) {
2568 uint32_t i, j = 0, k = 0, num_ent;
2569 struct scatterlist *sg, *sgl;
2574 /* Patch the corresponding protection tags */
2575 for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
2576 num_ent = sg_dma_len(sg) / 8;
2577 if (k + num_ent < blocks_done) {
2581 j = blocks_done - k - 1;
2586 if (k != blocks_done) {
2587 ql_log(ql_log_warn, vha, 0xf076,
2588 "unexpected tag values tag:lba=%u:%llu)\n",
2589 e_ref_tag, (unsigned long long)lba);
2594 struct sd_dif_tuple *spt;
2596 * This section came from initiator. Is it valid here?
2597 * should ulp be override with actual val???
2599 spt = page_address(sg_page(sg)) + sg->offset;
2602 spt->app_tag = 0xffff;
2603 if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
2604 spt->ref_tag = 0xffffffff;
2612 if (e_guard != a_guard) {
2613 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
2614 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2616 ql_log(ql_log_warn, vha, 0xe076,
2617 "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2618 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2619 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2620 a_guard, e_guard, cmd);
2625 if (e_ref_tag != a_ref_tag) {
2626 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
2627 cmd->se_cmd.bad_sector = e_ref_tag;
2629 ql_log(ql_log_warn, vha, 0xe077,
2630 "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2631 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2632 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2633 a_guard, e_guard, cmd);
2637 /* check appl tag */
2638 if (e_app_tag != a_app_tag) {
2639 cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
2640 cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
2642 ql_log(ql_log_warn, vha, 0xe078,
2643 "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
2644 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2645 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
2646 a_guard, e_guard, cmd);
2654 /* If hardware_lock held on entry, might drop it, then reaquire */
2655 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2656 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2657 struct qla_tgt_cmd *cmd,
2658 struct atio_from_isp *atio)
2660 struct ctio7_to_24xx *ctio24;
2661 struct qla_hw_data *ha = vha->hw;
2666 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2668 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2670 ql_dbg(ql_dbg_tgt, vha, 0xe050,
2671 "qla_target(%d): %s failed: unable to allocate "
2672 "request packet\n", vha->vp_idx, __func__);
2677 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
2678 ql_dbg(ql_dbg_tgt, vha, 0xe051,
2679 "qla_target(%d): Terminating cmd %p with "
2680 "incorrect state %d\n", vha->vp_idx, cmd,
2686 pkt->entry_count = 1;
2687 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2689 ctio24 = (struct ctio7_to_24xx *)pkt;
2690 ctio24->entry_type = CTIO_TYPE7;
2691 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
2692 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2693 ctio24->vp_index = vha->vp_idx;
2694 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2695 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2696 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2697 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
2698 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2699 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2700 CTIO7_FLAGS_TERMINATE);
2701 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2702 ctio24->u.status1.ox_id = cpu_to_le16(temp);
2704 /* Most likely, it isn't needed */
2705 ctio24->u.status1.residual = get_unaligned((uint32_t *)
2706 &atio->u.isp24.fcp_cmnd.add_cdb[
2707 atio->u.isp24.fcp_cmnd.add_cdb_len]);
2708 if (ctio24->u.status1.residual != 0)
2709 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
2711 /* Memory Barrier */
2713 qla2x00_start_iocbs(vha, vha->req);
2717 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2718 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2720 unsigned long flags;
2723 if (qlt_issue_marker(vha, ha_locked) < 0)
2727 rc = __qlt_send_term_exchange(vha, cmd, atio);
2729 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2732 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2733 rc = __qlt_send_term_exchange(vha, cmd, atio);
2735 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2736 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2739 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
2740 !cmd->cmd_sent_to_fw)) {
2741 if (!ha_locked && !in_interrupt())
2742 msleep(250); /* just in case */
2745 qlt_unmap_sg(vha, cmd);
2746 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2751 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
2753 struct list_head free_list;
2754 struct qla_tgt_cmd *cmd, *tcmd;
2756 vha->hw->tgt.leak_exchg_thresh_hold =
2757 (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
2760 if (!list_empty(&vha->hw->tgt.q_full_list)) {
2761 INIT_LIST_HEAD(&free_list);
2762 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
2764 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
2765 list_del(&cmd->cmd_list);
2766 /* This cmd was never sent to TCM. There is no need
2767 * to schedule free or call free_cmd
2770 vha->hw->tgt.num_qfull_cmds_alloc--;
2773 vha->hw->tgt.num_qfull_cmds_dropped = 0;
2776 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
2778 uint32_t total_leaked;
2780 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
2782 if (vha->hw->tgt.leak_exchg_thresh_hold &&
2783 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
2785 ql_dbg(ql_dbg_tgt, vha, 0xe079,
2786 "Chip reset due to exchange starvation: %d/%d.\n",
2787 total_leaked, vha->hw->fw_xcb_count);
2789 if (IS_P3P_TYPE(vha->hw))
2790 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2792 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2793 qla2xxx_wake_dpc(vha);
2798 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2800 struct qla_tgt_sess *sess = cmd->sess;
2802 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
2803 "%s: se_cmd[%p] ox_id %04x\n",
2804 __func__, &cmd->se_cmd,
2805 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
2807 BUG_ON(cmd->cmd_in_wq);
2810 qlt_decr_num_pend_cmds(cmd->vha);
2812 BUG_ON(cmd->sg_mapped);
2813 cmd->jiffies_at_free = get_jiffies_64();
2814 if (unlikely(cmd->free_sg))
2817 if (!sess || !sess->se_sess) {
2821 cmd->jiffies_at_free = get_jiffies_64();
2822 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
2824 EXPORT_SYMBOL(qlt_free_cmd);
2826 /* ha->hardware_lock supposed to be held on entry */
2827 static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
2828 struct qla_tgt_cmd *cmd, void *ctio)
2830 struct qla_tgt_srr_ctio *sc;
2831 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2832 struct qla_tgt_srr_imm *imm;
2835 cmd->cmd_flags |= BIT_15;
2837 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
2838 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
2841 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
2842 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2847 sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2850 /* IRQ is already OFF */
2851 spin_lock(&tgt->srr_lock);
2852 sc->srr_id = tgt->ctio_srr_id;
2853 list_add_tail(&sc->srr_list_entry,
2854 &tgt->srr_ctio_list);
2855 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
2856 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
2857 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2859 list_for_each_entry(imm, &tgt->srr_imm_list,
2861 if (imm->srr_id == sc->srr_id) {
2867 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
2868 "Scheduling srr work\n");
2869 schedule_work(&tgt->srr_work);
2871 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
2872 "qla_target(%d): imm_srr_id "
2873 "== ctio_srr_id (%d), but there is no "
2874 "corresponding SRR IMM, deleting CTIO "
2875 "SRR %p\n", vha->vp_idx,
2876 tgt->ctio_srr_id, sc);
2877 list_del(&sc->srr_list_entry);
2878 spin_unlock(&tgt->srr_lock);
2884 spin_unlock(&tgt->srr_lock);
2886 struct qla_tgt_srr_imm *ti;
2888 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
2889 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2891 spin_lock(&tgt->srr_lock);
2892 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2894 if (imm->srr_id == tgt->ctio_srr_id) {
2895 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
2896 "IMM SRR %p deleted (id %d)\n",
2898 list_del(&imm->srr_list_entry);
2899 qlt_reject_free_srr_imm(vha, imm, 1);
2902 spin_unlock(&tgt->srr_lock);
2911 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2913 static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
2914 struct qla_tgt_cmd *cmd, uint32_t status)
2919 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
2921 __constant_cpu_to_le16(OF_TERM_EXCH));
2926 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2931 /* ha->hardware_lock supposed to be held on entry */
2932 static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
2935 struct qla_hw_data *ha = vha->hw;
2938 if (ha->tgt.cmds[handle] != NULL) {
2939 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
2940 ha->tgt.cmds[handle] = NULL;
2946 /* ha->hardware_lock supposed to be held on entry */
2947 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
2948 uint32_t handle, void *ctio)
2950 struct qla_tgt_cmd *cmd = NULL;
2952 /* Clear out internal marks */
2953 handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
2954 CTIO_INTERMEDIATE_HANDLE_MARK);
2956 if (handle != QLA_TGT_NULL_HANDLE) {
2957 if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
2960 /* handle-1 is actually used */
2961 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
2962 ql_dbg(ql_dbg_tgt, vha, 0xe052,
2963 "qla_target(%d): Wrong handle %x received\n",
2964 vha->vp_idx, handle);
2967 cmd = qlt_get_cmd(vha, handle);
2968 if (unlikely(cmd == NULL)) {
2969 ql_dbg(ql_dbg_tgt, vha, 0xe053,
2970 "qla_target(%d): Suspicious: unable to "
2971 "find the command with handle %x\n", vha->vp_idx,
2975 } else if (ctio != NULL) {
2976 /* We can't get loop ID from CTIO7 */
2977 ql_dbg(ql_dbg_tgt, vha, 0xe054,
2978 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2979 "support NULL handles\n", vha->vp_idx);
2986 /* hardware_lock should be held by caller. */
2988 qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2990 struct qla_hw_data *ha = vha->hw;
2994 qlt_unmap_sg(vha, cmd);
2996 handle = qlt_make_handle(vha);
2998 /* TODO: fix debug message type and ids. */
2999 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3000 ql_dbg(ql_dbg_io, vha, 0xff00,
3001 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
3002 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3003 cmd->write_data_transferred = 0;
3004 cmd->state = QLA_TGT_STATE_DATA_IN;
3006 ql_dbg(ql_dbg_io, vha, 0xff01,
3007 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
3009 ha->tgt.tgt_ops->handle_data(cmd);
3011 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3012 ql_dbg(ql_dbg_io, vha, 0xff02,
3013 "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
3015 ql_dbg(ql_dbg_io, vha, 0xff03,
3016 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
3021 cmd->cmd_flags |= BIT_12;
3022 ha->tgt.tgt_ops->free_cmd(cmd);
3026 qlt_host_reset_handler(struct qla_hw_data *ha)
3028 struct qla_tgt_cmd *cmd;
3029 unsigned long flags;
3030 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3031 scsi_qla_host_t *vha = NULL;
3032 struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
3035 if (!base_vha->hw->tgt.tgt_ops)
3038 if (!tgt || qla_ini_mode_enabled(base_vha)) {
3039 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
3040 "Target mode disabled\n");
3044 ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
3045 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
3046 base_vha->dpc_flags);
3048 spin_lock_irqsave(&ha->hardware_lock, flags);
3049 for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
3050 cmd = qlt_get_cmd(base_vha, i);
3053 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3055 qlt_abort_cmd_on_host_reset(vha, cmd);
3057 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3062 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3064 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3065 uint32_t status, void *ctio)
3067 struct qla_hw_data *ha = vha->hw;
3068 struct se_cmd *se_cmd;
3069 struct target_core_fabric_ops *tfo;
3070 struct qla_tgt_cmd *cmd;
3072 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3073 /* That could happen only in case of an error/reset/abort */
3074 if (status != CTIO_SUCCESS) {
3075 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3076 "Intermediate CTIO received"
3077 " (status %x)\n", status);
3082 cmd = qlt_ctio_to_cmd(vha, handle, ctio);
3086 se_cmd = &cmd->se_cmd;
3087 tfo = se_cmd->se_tfo;
3088 cmd->cmd_sent_to_fw = 0;
3091 qlt_unmap_sg(vha, cmd);
3093 if (unlikely(status != CTIO_SUCCESS)) {
3094 switch (status & 0xFFFF) {
3095 case CTIO_LIP_RESET:
3096 case CTIO_TARGET_RESET:
3098 /* driver request abort via Terminate exchange */
3100 case CTIO_INVALID_RX_ID:
3102 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
3103 "qla_target(%d): CTIO with "
3104 "status %#x received, state %x, se_cmd %p, "
3105 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
3106 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
3107 status, cmd->state, se_cmd);
3110 case CTIO_PORT_LOGGED_OUT:
3111 case CTIO_PORT_UNAVAILABLE:
3112 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3113 "qla_target(%d): CTIO with PORT LOGGED "
3114 "OUT (29) or PORT UNAVAILABLE (28) status %x "
3115 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3116 status, cmd->state, se_cmd);
3119 case CTIO_SRR_RECEIVED:
3120 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
3121 "qla_target(%d): CTIO with SRR_RECEIVED"
3122 " status %x received (state %x, se_cmd %p)\n",
3123 vha->vp_idx, status, cmd->state, se_cmd);
3124 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
3129 case CTIO_DIF_ERROR: {
3130 struct ctio_crc_from_fw *crc =
3131 (struct ctio_crc_from_fw *)ctio;
3132 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
3133 "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
3134 vha->vp_idx, status, cmd->state, se_cmd,
3135 *((u64 *)&crc->actual_dif[0]),
3136 *((u64 *)&crc->expected_dif[0]));
3138 if (qlt_handle_dif_error(vha, cmd, ctio)) {
3139 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3140 /* scsi Write/xfer rdy complete */
3143 /* scsi read/xmit respond complete
3144 * call handle dif to send scsi status
3145 * rather than terminate exchange.
3147 cmd->state = QLA_TGT_STATE_PROCESSED;
3148 ha->tgt.tgt_ops->handle_dif_err(cmd);
3152 /* Need to generate a SCSI good completion.
3153 * because FW did not send scsi status.
3161 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
3162 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
3163 vha->vp_idx, status, cmd->state, se_cmd);
3168 /* "cmd->state == QLA_TGT_STATE_ABORTED" means
3169 * cmd is already aborted/terminated, we don't
3170 * need to terminate again. The exchange is already
3171 * cleaned up/freed at FW level. Just cleanup at driver
3174 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3175 (cmd->state != QLA_TGT_STATE_ABORTED)) {
3176 cmd->cmd_flags |= BIT_13;
3177 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
3183 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3185 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3188 cmd->state = QLA_TGT_STATE_DATA_IN;
3190 if (unlikely(status != CTIO_SUCCESS))
3193 cmd->write_data_transferred = 1;
3195 ha->tgt.tgt_ops->handle_data(cmd);
3197 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3198 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3199 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
3201 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3202 "qla_target(%d): A command in state (%d) should "
3203 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
3206 if (unlikely(status != CTIO_SUCCESS) &&
3207 (cmd->state != QLA_TGT_STATE_ABORTED)) {
3208 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
3213 ha->tgt.tgt_ops->free_cmd(cmd);
3216 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
3221 switch (task_codes) {
3222 case ATIO_SIMPLE_QUEUE:
3223 fcp_task_attr = MSG_SIMPLE_TAG;
3225 case ATIO_HEAD_OF_QUEUE:
3226 fcp_task_attr = MSG_HEAD_TAG;
3228 case ATIO_ORDERED_QUEUE:
3229 fcp_task_attr = MSG_ORDERED_TAG;
3231 case ATIO_ACA_QUEUE:
3232 fcp_task_attr = MSG_ACA_TAG;
3235 fcp_task_attr = MSG_SIMPLE_TAG;
3238 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
3239 "qla_target: unknown task code %x, use ORDERED instead\n",
3241 fcp_task_attr = MSG_ORDERED_TAG;
3245 return fcp_task_attr;
3248 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
3251 * Process context for I/O path into tcm_qla2xxx code
3253 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3255 scsi_qla_host_t *vha = cmd->vha;
3256 struct qla_hw_data *ha = vha->hw;
3257 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3258 struct qla_tgt_sess *sess = cmd->sess;
3259 struct atio_from_isp *atio = &cmd->atio;
3261 unsigned long flags;
3262 uint32_t data_length;
3263 int ret, fcp_task_attr, data_dir, bidi = 0;
3266 cmd->cmd_flags |= BIT_1;
3270 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3271 cmd->tag = atio->u.isp24.exchange_addr;
3272 cmd->unpacked_lun = scsilun_to_int(
3273 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
3275 if (atio->u.isp24.fcp_cmnd.rddata &&
3276 atio->u.isp24.fcp_cmnd.wrdata) {
3278 data_dir = DMA_TO_DEVICE;
3279 } else if (atio->u.isp24.fcp_cmnd.rddata)
3280 data_dir = DMA_FROM_DEVICE;
3281 else if (atio->u.isp24.fcp_cmnd.wrdata)
3282 data_dir = DMA_TO_DEVICE;
3284 data_dir = DMA_NONE;
3286 fcp_task_attr = qlt_get_fcp_task_attr(vha,
3287 atio->u.isp24.fcp_cmnd.task_attr);
3288 data_length = be32_to_cpu(get_unaligned((uint32_t *)
3289 &atio->u.isp24.fcp_cmnd.add_cdb[
3290 atio->u.isp24.fcp_cmnd.add_cdb_len]));
3292 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
3293 fcp_task_attr, data_dir, bidi);
3297 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
3299 spin_lock_irqsave(&ha->hardware_lock, flags);
3300 ha->tgt.tgt_ops->put_sess(sess);
3301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3305 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
3307 * cmd has not sent to target yet, so pass NULL as the second
3308 * argument to qlt_send_term_exchange() and free the memory here.
3310 cmd->cmd_flags |= BIT_2;
3311 spin_lock_irqsave(&ha->hardware_lock, flags);
3312 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
3314 qlt_decr_num_pend_cmds(vha);
3315 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3316 ha->tgt.tgt_ops->put_sess(sess);
3317 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3320 static void qlt_do_work(struct work_struct *work)
3322 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3327 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3328 struct qla_tgt_sess *sess,
3329 struct atio_from_isp *atio)
3331 struct se_session *se_sess = sess->se_sess;
3332 struct qla_tgt_cmd *cmd;
3335 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
3339 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
3340 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
3342 memcpy(&cmd->atio, atio, sizeof(*atio));
3343 cmd->state = QLA_TGT_STATE_NEW;
3344 cmd->tgt = vha->vha_tgt.qla_tgt;
3345 qlt_incr_num_pend_cmds(vha);
3347 cmd->se_cmd.map_tag = tag;
3349 cmd->loop_id = sess->loop_id;
3350 cmd->conf_compl_supported = sess->conf_compl_supported;
3355 static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
3358 static void qlt_create_sess_from_atio(struct work_struct *work)
3360 struct qla_tgt_sess_op *op = container_of(work,
3361 struct qla_tgt_sess_op, work);
3362 scsi_qla_host_t *vha = op->vha;
3363 struct qla_hw_data *ha = vha->hw;
3364 struct qla_tgt_sess *sess;
3365 struct qla_tgt_cmd *cmd;
3366 unsigned long flags;
3367 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3369 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3370 "qla_target(%d): Unable to find wwn login"
3371 " (s_id %x:%x:%x), trying to create it manually\n",
3372 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3374 if (op->atio.u.raw.entry_count > 1) {
3375 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3376 "Dropping multy entry atio %p\n", &op->atio);
3380 mutex_lock(&vha->vha_tgt.tgt_mutex);
3381 sess = qlt_make_local_sess(vha, s_id);
3382 /* sess has an extra creation ref. */
3383 mutex_unlock(&vha->vha_tgt.tgt_mutex);
3388 * Now obtain a pre-allocated session tag using the original op->atio
3389 * packet header, and dispatch into __qlt_do_work() using the existing
3392 cmd = qlt_get_tag(vha, sess, &op->atio);
3394 spin_lock_irqsave(&ha->hardware_lock, flags);
3395 qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
3396 ha->tgt.tgt_ops->put_sess(sess);
3397 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3402 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
3403 * the extra reference taken above by qlt_make_local_sess()
3410 spin_lock_irqsave(&ha->hardware_lock, flags);
3411 qlt_send_term_exchange(vha, NULL, &op->atio, 1);
3412 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3417 /* ha->hardware_lock supposed to be held on entry */
3418 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3419 struct atio_from_isp *atio)
3421 struct qla_hw_data *ha = vha->hw;
3422 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3423 struct qla_tgt_sess *sess;
3424 struct qla_tgt_cmd *cmd;
3426 if (unlikely(tgt->tgt_stop)) {
3427 ql_dbg(ql_dbg_io, vha, 0x3061,
3428 "New command while device %p is shutting down\n", tgt);
3432 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
3433 if (unlikely(!sess)) {
3434 struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
3439 memcpy(&op->atio, atio, sizeof(*atio));
3441 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3442 queue_work(qla_tgt_wq, &op->work);
3446 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3448 kref_get(&sess->se_sess->sess_kref);
3450 cmd = qlt_get_tag(vha, sess, atio);
3452 ql_dbg(ql_dbg_io, vha, 0x3062,
3453 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
3454 ha->tgt.tgt_ops->put_sess(sess);
3459 cmd->jiffies_at_alloc = get_jiffies_64();
3461 cmd->reset_count = vha->hw->chip_reset;
3464 cmd->cmd_flags |= BIT_0;
3465 INIT_WORK(&cmd->work, qlt_do_work);
3466 queue_work(qla_tgt_wq, &cmd->work);
3471 /* ha->hardware_lock supposed to be held on entry */
3472 static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3473 int fn, void *iocb, int flags)
3475 struct scsi_qla_host *vha = sess->vha;
3476 struct qla_hw_data *ha = vha->hw;
3477 struct qla_tgt_mgmt_cmd *mcmd;
3481 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
3483 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
3484 "qla_target(%d): Allocation of management "
3485 "command failed, some commands and their data could "
3486 "leak\n", vha->vp_idx);
3489 memset(mcmd, 0, sizeof(*mcmd));
3493 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
3494 sizeof(mcmd->orig_iocb.imm_ntfy));
3496 mcmd->tmr_func = fn;
3497 mcmd->flags = flags;
3498 mcmd->reset_count = vha->hw->chip_reset;
3501 case QLA_TGT_CLEAR_ACA:
3502 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
3503 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
3504 tmr_func = TMR_CLEAR_ACA;
3507 case QLA_TGT_TARGET_RESET:
3508 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
3509 "qla_target(%d): TARGET_RESET received\n",
3511 tmr_func = TMR_TARGET_WARM_RESET;
3514 case QLA_TGT_LUN_RESET:
3515 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
3516 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
3517 tmr_func = TMR_LUN_RESET;
3520 case QLA_TGT_CLEAR_TS:
3521 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
3522 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
3523 tmr_func = TMR_CLEAR_TASK_SET;
3526 case QLA_TGT_ABORT_TS:
3527 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
3528 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
3529 tmr_func = TMR_ABORT_TASK_SET;
3532 case QLA_TGT_ABORT_ALL:
3533 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
3534 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
3539 case QLA_TGT_ABORT_ALL_SESS:
3540 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
3541 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
3546 case QLA_TGT_NEXUS_LOSS_SESS:
3547 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
3548 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
3553 case QLA_TGT_NEXUS_LOSS:
3554 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
3555 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
3560 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
3561 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
3562 sess->vha->vp_idx, fn);
3563 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3567 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
3569 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
3570 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
3571 sess->vha->vp_idx, res);
3572 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3579 /* ha->hardware_lock supposed to be held on entry */
3580 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
3582 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3583 struct qla_hw_data *ha = vha->hw;
3584 struct qla_tgt *tgt;
3585 struct qla_tgt_sess *sess;
3586 uint32_t lun, unpacked_lun;
3589 tgt = vha->vha_tgt.qla_tgt;
3591 lun = a->u.isp24.fcp_cmnd.lun;
3592 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
3593 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
3594 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3595 a->u.isp24.fcp_hdr.s_id);
3596 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
3599 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
3600 "qla_target(%d): task mgmt fn 0x%x for "
3601 "non-existant session\n", vha->vp_idx, fn);
3602 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
3603 sizeof(struct atio_from_isp));
3606 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
3609 /* ha->hardware_lock supposed to be held on entry */
3610 static int __qlt_abort_task(struct scsi_qla_host *vha,
3611 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
3613 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3614 struct qla_hw_data *ha = vha->hw;
3615 struct qla_tgt_mgmt_cmd *mcmd;
3616 uint32_t lun, unpacked_lun;
3619 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
3621 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
3622 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
3623 vha->vp_idx, __func__);
3626 memset(mcmd, 0, sizeof(*mcmd));
3629 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
3630 sizeof(mcmd->orig_iocb.imm_ntfy));
3632 lun = a->u.isp24.fcp_cmnd.lun;
3633 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
3634 mcmd->reset_count = vha->hw->chip_reset;
3636 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
3637 le16_to_cpu(iocb->u.isp2x.seq_id));
3639 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
3640 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
3642 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
3649 /* ha->hardware_lock supposed to be held on entry */
3650 static int qlt_abort_task(struct scsi_qla_host *vha,
3651 struct imm_ntfy_from_isp *iocb)
3653 struct qla_hw_data *ha = vha->hw;
3654 struct qla_tgt_sess *sess;
3657 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
3659 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
3661 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
3662 "qla_target(%d): task abort for unexisting "
3663 "session\n", vha->vp_idx);
3664 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
3665 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
3668 return __qlt_abort_task(vha, iocb, sess);
3672 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3674 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3675 struct imm_ntfy_from_isp *iocb)
3679 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
3680 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3681 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
3683 switch (iocb->u.isp24.status_subcode) {
3689 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
3694 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3695 if (tgt->link_reinit_iocb_pending) {
3696 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
3698 tgt->link_reinit_iocb_pending = 0;
3700 res = 1; /* send notify ack */
3705 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3706 "qla_target(%d): Unsupported ELS command %x "
3707 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
3708 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
3715 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
3717 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
3718 size_t first_offset = 0, rem_offset = offset, tmp = 0;
3719 int i, sg_srr_cnt, bufflen = 0;
3721 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
3722 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3723 "cmd->sg_cnt: %u, direction: %d\n",
3724 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
3727 * FIXME: Reject non zero SRR relative offset until we can test
3728 * this code properly.
3730 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
3733 if (!cmd->sg || !cmd->sg_cnt) {
3734 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
3735 "Missing cmd->sg or zero cmd->sg_cnt in"
3736 " qla_tgt_set_data_offset\n");
3740 * Walk the current cmd->sg list until we locate the new sg_srr_start
3742 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
3743 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
3744 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3745 i, sg, sg_page(sg), sg->length, sg->offset);
3747 if ((sg->length + tmp) > offset) {
3748 first_offset = rem_offset;
3750 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
3751 "Found matching sg[%d], using %p as sg_srr_start, "
3752 "and using first_offset: %zu\n", i, sg,
3757 rem_offset -= sg->length;
3760 if (!sg_srr_start) {
3761 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
3762 "Unable to locate sg_srr_start for offset: %u\n", offset);
3765 sg_srr_cnt = (cmd->sg_cnt - i);
3767 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
3769 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
3770 "Unable to allocate sgp\n");
3773 sg_init_table(sg_srr, sg_srr_cnt);
3776 * Walk the remaining list for sg_srr_start, mapping to the newly
3777 * allocated sg_srr taking first_offset into account.
3779 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
3781 sg_set_page(sgp, sg_page(sg),
3782 (sg->length - first_offset), first_offset);
3785 sg_set_page(sgp, sg_page(sg), sg->length, 0);
3787 bufflen += sgp->length;
3795 cmd->sg_cnt = sg_srr_cnt;
3796 cmd->bufflen = bufflen;
3797 cmd->offset += offset;
3800 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
3801 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
3803 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
3805 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
3808 if (cmd->sg_cnt < 0)
3811 if (cmd->bufflen < 0)
3817 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
3818 uint32_t srr_rel_offs, int *xmit_type)
3820 int res = 0, rel_offs;
3822 rel_offs = srr_rel_offs - cmd->offset;
3823 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3824 srr_rel_offs, rel_offs);
3826 *xmit_type = QLA_TGT_XMIT_ALL;
3829 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
3830 "qla_target(%d): SRR rel_offs (%d) < 0",
3831 cmd->vha->vp_idx, rel_offs);
3833 } else if (rel_offs == cmd->bufflen)
3834 *xmit_type = QLA_TGT_XMIT_STATUS;
3835 else if (rel_offs > 0)
3836 res = qlt_set_data_offset(cmd, rel_offs);
3841 /* No locks, thread context */
3842 static void qlt_handle_srr(struct scsi_qla_host *vha,
3843 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
3845 struct imm_ntfy_from_isp *ntfy =
3846 (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
3847 struct qla_hw_data *ha = vha->hw;
3848 struct qla_tgt_cmd *cmd = sctio->cmd;
3849 struct se_cmd *se_cmd = &cmd->se_cmd;
3850 unsigned long flags;
3851 int xmit_type = 0, resp = 0;
3855 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
3856 srr_ui = ntfy->u.isp24.srr_ui;
3858 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
3863 spin_lock_irqsave(&ha->hardware_lock, flags);
3864 qlt_send_notify_ack(vha, ntfy,
3865 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3866 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3867 xmit_type = QLA_TGT_XMIT_STATUS;
3870 case SRR_IU_DATA_IN:
3871 if (!cmd->sg || !cmd->sg_cnt) {
3872 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
3873 "Unable to process SRR_IU_DATA_IN due to"
3874 " missing cmd->sg, state: %d\n", cmd->state);
3878 if (se_cmd->scsi_status != 0) {
3879 ql_dbg(ql_dbg_tgt, vha, 0xe02a,
3880 "Rejecting SRR_IU_DATA_IN with non GOOD "
3884 cmd->bufflen = se_cmd->data_length;
3886 if (qlt_has_data(cmd)) {
3887 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3889 spin_lock_irqsave(&ha->hardware_lock, flags);
3890 qlt_send_notify_ack(vha, ntfy,
3891 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3892 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3895 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
3896 "qla_target(%d): SRR for in data for cmd "
3897 "without them (tag %d, SCSI status %d), "
3898 "reject", vha->vp_idx, cmd->tag,
3899 cmd->se_cmd.scsi_status);
3903 case SRR_IU_DATA_OUT:
3904 if (!cmd->sg || !cmd->sg_cnt) {
3905 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
3906 "Unable to process SRR_IU_DATA_OUT due to"
3907 " missing cmd->sg\n");
3911 if (se_cmd->scsi_status != 0) {
3912 ql_dbg(ql_dbg_tgt, vha, 0xe02b,
3913 "Rejecting SRR_IU_DATA_OUT"
3914 " with non GOOD scsi_status\n");
3917 cmd->bufflen = se_cmd->data_length;
3919 if (qlt_has_data(cmd)) {
3920 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3922 spin_lock_irqsave(&ha->hardware_lock, flags);
3923 qlt_send_notify_ack(vha, ntfy,
3924 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3925 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3926 if (xmit_type & QLA_TGT_XMIT_DATA) {
3927 cmd->cmd_flags |= BIT_8;
3928 qlt_rdy_to_xfer(cmd);
3931 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
3932 "qla_target(%d): SRR for out data for cmd "
3933 "without them (tag %d, SCSI status %d), "
3934 "reject", vha->vp_idx, cmd->tag,
3935 cmd->se_cmd.scsi_status);
3940 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
3941 "qla_target(%d): Unknown srr_ui value %x",
3942 vha->vp_idx, srr_ui);
3946 /* Transmit response in case of status and data-in cases */
3948 cmd->cmd_flags |= BIT_7;
3949 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
3955 spin_lock_irqsave(&ha->hardware_lock, flags);
3956 qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
3957 NOTIFY_ACK_SRR_FLAGS_REJECT,
3958 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3959 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3960 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3961 cmd->state = QLA_TGT_STATE_DATA_IN;
3964 cmd->cmd_flags |= BIT_9;
3965 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
3967 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3970 static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
3971 struct qla_tgt_srr_imm *imm, int ha_locked)
3973 struct qla_hw_data *ha = vha->hw;
3974 unsigned long flags = 0;
3977 spin_lock_irqsave(&ha->hardware_lock, flags);
3979 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
3980 NOTIFY_ACK_SRR_FLAGS_REJECT,
3981 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3982 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3985 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3990 static void qlt_handle_srr_work(struct work_struct *work)
3992 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
3993 struct scsi_qla_host *vha = tgt->vha;
3994 struct qla_tgt_srr_ctio *sctio;
3995 unsigned long flags;
3997 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
4001 spin_lock_irqsave(&tgt->srr_lock, flags);
4002 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
4003 struct qla_tgt_srr_imm *imm, *i, *ti;
4004 struct qla_tgt_cmd *cmd;
4005 struct se_cmd *se_cmd;
4008 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
4010 if (i->srr_id == sctio->srr_id) {
4011 list_del(&i->srr_list_entry);
4013 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
4014 "qla_target(%d): There must be "
4015 "only one IMM SRR per CTIO SRR "
4016 "(IMM SRR %p, id %d, CTIO %p\n",
4017 vha->vp_idx, i, i->srr_id, sctio);
4018 qlt_reject_free_srr_imm(tgt->vha, i, 0);
4024 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
4025 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
4029 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
4030 "Not found matching IMM for SRR CTIO (id %d)\n",
4034 list_del(&sctio->srr_list_entry);
4036 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4040 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
4041 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
4050 se_cmd = &cmd->se_cmd;
4052 cmd->sg_cnt = se_cmd->t_data_nents;
4053 cmd->sg = se_cmd->t_data_sg;
4055 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
4056 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
4057 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
4058 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
4059 cmd->sg_cnt, cmd->offset);
4061 qlt_handle_srr(vha, sctio, imm);
4067 spin_unlock_irqrestore(&tgt->srr_lock, flags);
4070 /* ha->hardware_lock supposed to be held on entry */
4071 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
4072 struct imm_ntfy_from_isp *iocb)
4074 struct qla_tgt_srr_imm *imm;
4075 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4076 struct qla_tgt_srr_ctio *sctio;
4080 ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
4083 imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
4085 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
4087 /* IRQ is already OFF */
4088 spin_lock(&tgt->srr_lock);
4089 imm->srr_id = tgt->imm_srr_id;
4090 list_add_tail(&imm->srr_list_entry,
4091 &tgt->srr_imm_list);
4092 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
4093 "IMM NTFY SRR %p added (id %d, ui %x)\n",
4094 imm, imm->srr_id, iocb->u.isp24.srr_ui);
4095 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
4097 list_for_each_entry(sctio, &tgt->srr_ctio_list,
4099 if (sctio->srr_id == imm->srr_id) {
4105 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
4106 "Scheduling srr work\n");
4107 schedule_work(&tgt->srr_work);
4109 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
4110 "qla_target(%d): imm_srr_id "
4111 "== ctio_srr_id (%d), but there is no "
4112 "corresponding SRR CTIO, deleting IMM "
4113 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
4115 list_del(&imm->srr_list_entry);
4119 spin_unlock(&tgt->srr_lock);
4123 spin_unlock(&tgt->srr_lock);
4125 struct qla_tgt_srr_ctio *ts;
4127 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
4128 "qla_target(%d): Unable to allocate SRR IMM "
4129 "entry, SRR request will be rejected\n", vha->vp_idx);
4131 /* IRQ is already OFF */
4132 spin_lock(&tgt->srr_lock);
4133 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
4135 if (sctio->srr_id == tgt->imm_srr_id) {
4136 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
4137 "CTIO SRR %p deleted (id %d)\n",
4138 sctio, sctio->srr_id);
4139 list_del(&sctio->srr_list_entry);
4140 qlt_send_term_exchange(vha, sctio->cmd,
4141 &sctio->cmd->atio, 1);
4145 spin_unlock(&tgt->srr_lock);
4152 qlt_send_notify_ack(vha, iocb, 0, 0, 0,
4153 NOTIFY_ACK_SRR_FLAGS_REJECT,
4154 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4155 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4159 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4161 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
4162 struct imm_ntfy_from_isp *iocb)
4164 struct qla_hw_data *ha = vha->hw;
4165 uint32_t add_flags = 0;
4166 int send_notify_ack = 1;
4169 status = le16_to_cpu(iocb->u.isp2x.status);
4171 case IMM_NTFY_LIP_RESET:
4173 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
4174 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
4175 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
4176 iocb->u.isp24.status_subcode);
4178 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
4179 send_notify_ack = 0;
4183 case IMM_NTFY_LIP_LINK_REINIT:
4185 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4186 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
4187 "qla_target(%d): LINK REINIT (loop %#x, "
4188 "subcode %x)\n", vha->vp_idx,
4189 le16_to_cpu(iocb->u.isp24.nport_handle),
4190 iocb->u.isp24.status_subcode);
4191 if (tgt->link_reinit_iocb_pending) {
4192 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
4195 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
4196 tgt->link_reinit_iocb_pending = 1;
4198 * QLogic requires to wait after LINK REINIT for possible
4199 * PDISC or ADISC ELS commands
4201 send_notify_ack = 0;
4205 case IMM_NTFY_PORT_LOGOUT:
4206 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
4207 "qla_target(%d): Port logout (loop "
4208 "%#x, subcode %x)\n", vha->vp_idx,
4209 le16_to_cpu(iocb->u.isp24.nport_handle),
4210 iocb->u.isp24.status_subcode);
4212 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
4213 send_notify_ack = 0;
4214 /* The sessions will be cleared in the callback, if needed */
4217 case IMM_NTFY_GLBL_TPRLO:
4218 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
4219 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
4220 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
4221 send_notify_ack = 0;
4222 /* The sessions will be cleared in the callback, if needed */
4225 case IMM_NTFY_PORT_CONFIG:
4226 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
4227 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
4229 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
4230 send_notify_ack = 0;
4231 /* The sessions will be cleared in the callback, if needed */
4234 case IMM_NTFY_GLBL_LOGO:
4235 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
4236 "qla_target(%d): Link failure detected\n",
4238 /* I_T nexus loss */
4239 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
4240 send_notify_ack = 0;
4243 case IMM_NTFY_IOCB_OVERFLOW:
4244 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
4245 "qla_target(%d): Cannot provide requested "
4246 "capability (IOCB overflowed the immediate notify "
4247 "resource count)\n", vha->vp_idx);
4250 case IMM_NTFY_ABORT_TASK:
4251 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
4252 "qla_target(%d): Abort Task (S %08x I %#x -> "
4253 "L %#x)\n", vha->vp_idx,
4254 le16_to_cpu(iocb->u.isp2x.seq_id),
4255 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
4256 le16_to_cpu(iocb->u.isp2x.lun));
4257 if (qlt_abort_task(vha, iocb) == 0)
4258 send_notify_ack = 0;
4261 case IMM_NTFY_RESOURCE:
4262 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
4263 "qla_target(%d): Out of resources, host %ld\n",
4264 vha->vp_idx, vha->host_no);
4267 case IMM_NTFY_MSG_RX:
4268 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
4269 "qla_target(%d): Immediate notify task %x\n",
4270 vha->vp_idx, iocb->u.isp2x.task_flags);
4271 if (qlt_handle_task_mgmt(vha, iocb) == 0)
4272 send_notify_ack = 0;
4276 if (qlt_24xx_handle_els(vha, iocb) == 0)
4277 send_notify_ack = 0;
4281 qlt_prepare_srr_imm(vha, iocb);
4282 send_notify_ack = 0;
4286 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
4287 "qla_target(%d): Received unknown immediate "
4288 "notify status %x\n", vha->vp_idx, status);
4292 if (send_notify_ack)
4293 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
4297 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4298 * This function sends busy to ISP 2xxx or 24xx.
4300 static int __qlt_send_busy(struct scsi_qla_host *vha,
4301 struct atio_from_isp *atio, uint16_t status)
4303 struct ctio7_to_24xx *ctio24;
4304 struct qla_hw_data *ha = vha->hw;
4306 struct qla_tgt_sess *sess = NULL;
4308 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4309 atio->u.isp24.fcp_hdr.s_id);
4311 qlt_send_term_exchange(vha, NULL, atio, 1);
4314 /* Sending marker isn't necessary, since we called from ISR */
4316 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
4318 ql_dbg(ql_dbg_io, vha, 0x3063,
4319 "qla_target(%d): %s failed: unable to allocate "
4320 "request packet", vha->vp_idx, __func__);
4324 pkt->entry_count = 1;
4325 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
4327 ctio24 = (struct ctio7_to_24xx *)pkt;
4328 ctio24->entry_type = CTIO_TYPE7;
4329 ctio24->nport_handle = sess->loop_id;
4330 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
4331 ctio24->vp_index = vha->vp_idx;
4332 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
4333 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
4334 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
4335 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
4336 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
4337 __constant_cpu_to_le16(
4338 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
4339 CTIO7_FLAGS_DONT_RET_CTIO);
4341 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
4342 * if the explicit conformation is used.
4344 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
4345 ctio24->u.status1.scsi_status = cpu_to_le16(status);
4346 /* Memory Barrier */
4348 qla2x00_start_iocbs(vha, vha->req);
4353 * This routine is used to allocate a command for either a QFull condition
4354 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
4358 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
4359 struct atio_from_isp *atio, uint16_t status, int qfull)
4361 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4362 struct qla_hw_data *ha = vha->hw;
4363 struct qla_tgt_sess *sess;
4364 struct se_session *se_sess;
4365 struct qla_tgt_cmd *cmd;
4368 if (unlikely(tgt->tgt_stop)) {
4369 ql_dbg(ql_dbg_io, vha, 0x300a,
4370 "New command while device %p is shutting down\n", tgt);
4374 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
4375 vha->hw->tgt.num_qfull_cmds_dropped++;
4376 if (vha->hw->tgt.num_qfull_cmds_dropped >
4377 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
4378 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
4379 vha->hw->tgt.num_qfull_cmds_dropped;
4381 ql_dbg(ql_dbg_io, vha, 0x3068,
4382 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
4383 vha->vp_idx, __func__,
4384 vha->hw->tgt.num_qfull_cmds_dropped);
4386 qlt_chk_exch_leak_thresh_hold(vha);
4390 sess = ha->tgt.tgt_ops->find_sess_by_s_id
4391 (vha, atio->u.isp24.fcp_hdr.s_id);
4395 se_sess = sess->se_sess;
4397 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
4401 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
4403 ql_dbg(ql_dbg_io, vha, 0x3009,
4404 "qla_target(%d): %s: Allocation of cmd failed\n",
4405 vha->vp_idx, __func__);
4407 vha->hw->tgt.num_qfull_cmds_dropped++;
4408 if (vha->hw->tgt.num_qfull_cmds_dropped >
4409 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
4410 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
4411 vha->hw->tgt.num_qfull_cmds_dropped;
4413 qlt_chk_exch_leak_thresh_hold(vha);
4417 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
4419 qlt_incr_num_pend_cmds(vha);
4420 INIT_LIST_HEAD(&cmd->cmd_list);
4421 memcpy(&cmd->atio, atio, sizeof(*atio));
4423 cmd->tgt = vha->vha_tgt.qla_tgt;
4425 cmd->reset_count = vha->hw->chip_reset;
4430 /* NOTE: borrowing the state field to carry the status */
4431 cmd->state = status;
4433 cmd->term_exchg = 1;
4435 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
4437 vha->hw->tgt.num_qfull_cmds_alloc++;
4438 if (vha->hw->tgt.num_qfull_cmds_alloc >
4439 vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
4440 vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
4441 vha->hw->tgt.num_qfull_cmds_alloc;
4445 qlt_free_qfull_cmds(struct scsi_qla_host *vha)
4447 struct qla_hw_data *ha = vha->hw;
4448 unsigned long flags;
4449 struct qla_tgt_cmd *cmd, *tcmd;
4450 struct list_head free_list;
4453 if (list_empty(&ha->tgt.q_full_list))
4456 INIT_LIST_HEAD(&free_list);
4458 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
4460 if (list_empty(&ha->tgt.q_full_list)) {
4461 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
4465 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
4467 /* cmd->state is a borrowed field to hold status */
4468 rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
4469 else if (cmd->term_exchg)
4470 rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
4476 ql_dbg(ql_dbg_io, vha, 0x3006,
4477 "%s: busy sent for ox_id[%04x]\n", __func__,
4478 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
4479 else if (cmd->term_exchg)
4480 ql_dbg(ql_dbg_io, vha, 0x3007,
4481 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
4482 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
4484 ql_dbg(ql_dbg_io, vha, 0x3008,
4485 "%s: Unexpected cmd in QFull list %p\n", __func__,
4488 list_del(&cmd->cmd_list);
4489 list_add_tail(&cmd->cmd_list, &free_list);
4491 /* piggy back on hardware_lock for protection */
4492 vha->hw->tgt.num_qfull_cmds_alloc--;
4494 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
4498 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
4499 list_del(&cmd->cmd_list);
4500 /* This cmd was never sent to TCM. There is no need
4501 * to schedule free or call free_cmd
4509 qlt_send_busy(struct scsi_qla_host *vha,
4510 struct atio_from_isp *atio, uint16_t status)
4514 rc = __qlt_send_busy(vha, atio, status);
4516 qlt_alloc_qfull_cmd(vha, atio, status, 1);
4520 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
4521 struct atio_from_isp *atio)
4523 struct qla_hw_data *ha = vha->hw;
4526 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
4529 status = temp_sam_status;
4530 qlt_send_busy(vha, atio, status);
4534 /* ha->hardware_lock supposed to be held on entry */
4535 /* called via callback from qla2xxx */
4536 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
4537 struct atio_from_isp *atio)
4539 struct qla_hw_data *ha = vha->hw;
4540 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4543 if (unlikely(tgt == NULL)) {
4544 ql_dbg(ql_dbg_io, vha, 0x3064,
4545 "ATIO pkt, but no tgt (ha %p)", ha);
4549 * In tgt_stop mode we also should allow all requests to pass.
4550 * Otherwise, some commands can stuck.
4553 tgt->irq_cmd_count++;
4555 switch (atio->u.raw.entry_type) {
4557 if (unlikely(atio->u.isp24.exchange_addr ==
4558 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
4559 ql_dbg(ql_dbg_io, vha, 0x3065,
4560 "qla_target(%d): ATIO_TYPE7 "
4561 "received with UNKNOWN exchange address, "
4562 "sending QUEUE_FULL\n", vha->vp_idx);
4563 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
4569 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
4570 rc = qlt_chk_qfull_thresh_hold(vha, atio);
4572 tgt->irq_cmd_count--;
4575 rc = qlt_handle_cmd_for_atio(vha, atio);
4577 rc = qlt_handle_task_mgmt(vha, atio);
4579 if (unlikely(rc != 0)) {
4581 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4582 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
4584 qlt_send_term_exchange(vha, NULL, atio, 1);
4587 if (tgt->tgt_stop) {
4588 ql_dbg(ql_dbg_tgt, vha, 0xe059,
4589 "qla_target: Unable to send "
4590 "command to target for req, "
4593 ql_dbg(ql_dbg_tgt, vha, 0xe05a,
4594 "qla_target(%d): Unable to send "
4595 "command to target, sending BUSY "
4596 "status.\n", vha->vp_idx);
4597 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
4603 case IMMED_NOTIFY_TYPE:
4605 if (unlikely(atio->u.isp2x.entry_status != 0)) {
4606 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
4607 "qla_target(%d): Received ATIO packet %x "
4608 "with error status %x\n", vha->vp_idx,
4609 atio->u.raw.entry_type,
4610 atio->u.isp2x.entry_status);
4613 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
4614 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
4619 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
4620 "qla_target(%d): Received unknown ATIO atio "
4621 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
4625 tgt->irq_cmd_count--;
4628 /* ha->hardware_lock supposed to be held on entry */
4629 /* called via callback from qla2xxx */
4630 static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
4632 struct qla_hw_data *ha = vha->hw;
4633 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4635 if (unlikely(tgt == NULL)) {
4636 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
4637 "qla_target(%d): Response pkt %x received, but no "
4638 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
4643 * In tgt_stop mode we also should allow all requests to pass.
4644 * Otherwise, some commands can stuck.
4647 tgt->irq_cmd_count++;
4649 switch (pkt->entry_type) {
4653 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
4654 qlt_do_ctio_completion(vha, entry->handle,
4655 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4660 case ACCEPT_TGT_IO_TYPE:
4662 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
4664 if (atio->u.isp2x.status !=
4665 __constant_cpu_to_le16(ATIO_CDB_VALID)) {
4666 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
4667 "qla_target(%d): ATIO with error "
4668 "status %x received\n", vha->vp_idx,
4669 le16_to_cpu(atio->u.isp2x.status));
4673 rc = qlt_chk_qfull_thresh_hold(vha, atio);
4675 tgt->irq_cmd_count--;
4679 rc = qlt_handle_cmd_for_atio(vha, atio);
4680 if (unlikely(rc != 0)) {
4682 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
4683 qlt_send_busy(vha, atio, 0);
4685 qlt_send_term_exchange(vha, NULL, atio, 1);
4688 if (tgt->tgt_stop) {
4689 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
4690 "qla_target: Unable to send "
4691 "command to target, sending TERM "
4692 "EXCHANGE for rsp\n");
4693 qlt_send_term_exchange(vha, NULL,
4696 ql_dbg(ql_dbg_tgt, vha, 0xe060,
4697 "qla_target(%d): Unable to send "
4698 "command to target, sending BUSY "
4699 "status\n", vha->vp_idx);
4700 qlt_send_busy(vha, atio, 0);
4707 case CONTINUE_TGT_IO_TYPE:
4709 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
4710 qlt_do_ctio_completion(vha, entry->handle,
4711 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4718 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
4719 qlt_do_ctio_completion(vha, entry->handle,
4720 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4725 case IMMED_NOTIFY_TYPE:
4726 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
4727 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
4730 case NOTIFY_ACK_TYPE:
4731 if (tgt->notify_ack_expected > 0) {
4732 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
4733 ql_dbg(ql_dbg_tgt, vha, 0xe036,
4734 "NOTIFY_ACK seq %08x status %x\n",
4735 le16_to_cpu(entry->u.isp2x.seq_id),
4736 le16_to_cpu(entry->u.isp2x.status));
4737 tgt->notify_ack_expected--;
4738 if (entry->u.isp2x.status !=
4739 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
4740 ql_dbg(ql_dbg_tgt, vha, 0xe061,
4741 "qla_target(%d): NOTIFY_ACK "
4742 "failed %x\n", vha->vp_idx,
4743 le16_to_cpu(entry->u.isp2x.status));
4746 ql_dbg(ql_dbg_tgt, vha, 0xe062,
4747 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
4752 case ABTS_RECV_24XX:
4753 ql_dbg(ql_dbg_tgt, vha, 0xe037,
4754 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
4755 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
4758 case ABTS_RESP_24XX:
4759 if (tgt->abts_resp_expected > 0) {
4760 struct abts_resp_from_24xx_fw *entry =
4761 (struct abts_resp_from_24xx_fw *)pkt;
4762 ql_dbg(ql_dbg_tgt, vha, 0xe038,
4763 "ABTS_RESP_24XX: compl_status %x\n",
4764 entry->compl_status);
4765 tgt->abts_resp_expected--;
4766 if (le16_to_cpu(entry->compl_status) !=
4767 ABTS_RESP_COMPL_SUCCESS) {
4768 if ((entry->error_subcode1 == 0x1E) &&
4769 (entry->error_subcode2 == 0)) {
4771 * We've got a race here: aborted
4772 * exchange not terminated, i.e.
4773 * response for the aborted command was
4774 * sent between the abort request was
4775 * received and processed.
4776 * Unfortunately, the firmware has a
4777 * silly requirement that all aborted
4778 * exchanges must be explicitely
4779 * terminated, otherwise it refuses to
4780 * send responses for the abort
4781 * requests. So, we have to
4782 * (re)terminate the exchange and retry
4783 * the abort response.
4785 qlt_24xx_retry_term_exchange(vha,
4788 ql_dbg(ql_dbg_tgt, vha, 0xe063,
4789 "qla_target(%d): ABTS_RESP_24XX "
4790 "failed %x (subcode %x:%x)",
4791 vha->vp_idx, entry->compl_status,
4792 entry->error_subcode1,
4793 entry->error_subcode2);
4796 ql_dbg(ql_dbg_tgt, vha, 0xe064,
4797 "qla_target(%d): Unexpected ABTS_RESP_24XX "
4798 "received\n", vha->vp_idx);
4803 ql_dbg(ql_dbg_tgt, vha, 0xe065,
4804 "qla_target(%d): Received unknown response pkt "
4805 "type %x\n", vha->vp_idx, pkt->entry_type);
4809 tgt->irq_cmd_count--;
4813 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4815 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
4818 struct qla_hw_data *ha = vha->hw;
4819 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4822 if (!ha->tgt.tgt_ops)
4825 if (unlikely(tgt == NULL)) {
4826 ql_dbg(ql_dbg_tgt, vha, 0xe03a,
4827 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
4831 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
4835 * In tgt_stop mode we also should allow all requests to pass.
4836 * Otherwise, some commands can stuck.
4839 tgt->irq_cmd_count++;
4842 case MBA_RESET: /* Reset */
4843 case MBA_SYSTEM_ERR: /* System Error */
4844 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
4845 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
4846 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
4847 "qla_target(%d): System error async event %#x "
4848 "occurred", vha->vp_idx, code);
4850 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
4851 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4856 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
4857 "qla_target(%d): Async LOOP_UP occurred "
4858 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
4859 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4860 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4861 if (tgt->link_reinit_iocb_pending) {
4862 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
4864 tgt->link_reinit_iocb_pending = 0;
4869 case MBA_LIP_OCCURRED:
4872 case MBA_RSCN_UPDATE:
4873 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
4874 "qla_target(%d): Async event %#x occurred "
4875 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4876 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4877 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4880 case MBA_PORT_UPDATE:
4881 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
4882 "qla_target(%d): Port update async event %#x "
4883 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
4884 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
4885 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4886 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4888 login_code = le16_to_cpu(mailbox[2]);
4889 if (login_code == 0x4)
4890 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
4891 "Async MB 2: Got PLOGI Complete\n");
4892 else if (login_code == 0x7)
4893 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
4894 "Async MB 2: Port Logged Out\n");
4901 tgt->irq_cmd_count--;
4904 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4910 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
4912 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
4913 "qla_target(%d): Allocation of tmp FC port failed",
4918 fcport->loop_id = loop_id;
4920 rc = qla2x00_get_port_database(vha, fcport, 0);
4921 if (rc != QLA_SUCCESS) {
4922 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
4923 "qla_target(%d): Failed to retrieve fcport "
4924 "information -- get_port_database() returned %x "
4925 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
4933 /* Must be called under tgt_mutex */
4934 static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
4937 struct qla_tgt_sess *sess = NULL;
4938 fc_port_t *fcport = NULL;
4939 int rc, global_resets;
4940 uint16_t loop_id = 0;
4944 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
4946 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
4948 if ((s_id[0] == 0xFF) &&
4949 (s_id[1] == 0xFC)) {
4951 * This is Domain Controller, so it should be
4952 * OK to drop SCSI commands from it.
4954 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
4955 "Unable to find initiator with S_ID %x:%x:%x",
4956 s_id[0], s_id[1], s_id[2]);
4958 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
4959 "qla_target(%d): Unable to find "
4960 "initiator with S_ID %x:%x:%x",
4961 vha->vp_idx, s_id[0], s_id[1],
4966 fcport = qlt_get_port_database(vha, loop_id);
4970 if (global_resets !=
4971 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
4972 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
4973 "qla_target(%d): global reset during session discovery "
4974 "(counter was %d, new %d), retrying", vha->vp_idx,
4976 atomic_read(&vha->vha_tgt.
4977 qla_tgt->tgt_global_resets_count));
4981 sess = qlt_create_sess(vha, fcport, true);
4987 static void qlt_abort_work(struct qla_tgt *tgt,
4988 struct qla_tgt_sess_work_param *prm)
4990 struct scsi_qla_host *vha = tgt->vha;
4991 struct qla_hw_data *ha = vha->hw;
4992 struct qla_tgt_sess *sess = NULL;
4993 unsigned long flags;
4998 spin_lock_irqsave(&ha->hardware_lock, flags);
5003 s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
5004 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
5005 s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
5007 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
5008 (unsigned char *)&be_s_id);
5010 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5012 mutex_lock(&vha->vha_tgt.tgt_mutex);
5013 sess = qlt_make_local_sess(vha, s_id);
5014 /* sess has got an extra creation ref */
5015 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5017 spin_lock_irqsave(&ha->hardware_lock, flags);
5021 kref_get(&sess->se_sess->sess_kref);
5027 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
5031 ha->tgt.tgt_ops->put_sess(sess);
5032 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5036 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
5038 ha->tgt.tgt_ops->put_sess(sess);
5039 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5042 static void qlt_tmr_work(struct qla_tgt *tgt,
5043 struct qla_tgt_sess_work_param *prm)
5045 struct atio_from_isp *a = &prm->tm_iocb2;
5046 struct scsi_qla_host *vha = tgt->vha;
5047 struct qla_hw_data *ha = vha->hw;
5048 struct qla_tgt_sess *sess = NULL;
5049 unsigned long flags;
5050 uint8_t *s_id = NULL; /* to hide compiler warnings */
5052 uint32_t lun, unpacked_lun;
5056 spin_lock_irqsave(&ha->hardware_lock, flags);
5061 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
5062 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5064 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5066 mutex_lock(&vha->vha_tgt.tgt_mutex);
5067 sess = qlt_make_local_sess(vha, s_id);
5068 /* sess has got an extra creation ref */
5069 mutex_unlock(&vha->vha_tgt.tgt_mutex);
5071 spin_lock_irqsave(&ha->hardware_lock, flags);
5075 kref_get(&sess->se_sess->sess_kref);
5079 lun = a->u.isp24.fcp_cmnd.lun;
5080 lun_size = sizeof(lun);
5081 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
5082 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
5084 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
5088 ha->tgt.tgt_ops->put_sess(sess);
5089 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5093 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
5095 ha->tgt.tgt_ops->put_sess(sess);
5096 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5099 static void qlt_sess_work_fn(struct work_struct *work)
5101 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
5102 struct scsi_qla_host *vha = tgt->vha;
5103 unsigned long flags;
5105 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
5107 spin_lock_irqsave(&tgt->sess_work_lock, flags);
5108 while (!list_empty(&tgt->sess_works_list)) {
5109 struct qla_tgt_sess_work_param *prm = list_entry(
5110 tgt->sess_works_list.next, typeof(*prm),
5111 sess_works_list_entry);
5114 * This work can be scheduled on several CPUs at time, so we
5115 * must delete the entry to eliminate double processing
5117 list_del(&prm->sess_works_list_entry);
5119 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
5121 switch (prm->type) {
5122 case QLA_TGT_SESS_WORK_ABORT:
5123 qlt_abort_work(tgt, prm);
5125 case QLA_TGT_SESS_WORK_TM:
5126 qlt_tmr_work(tgt, prm);
5133 spin_lock_irqsave(&tgt->sess_work_lock, flags);
5137 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
5140 /* Must be called under tgt_host_action_mutex */
5141 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
5143 struct qla_tgt *tgt;
5145 if (!QLA_TGT_MODE_ENABLED())
5148 if (!IS_TGT_MODE_CAPABLE(ha)) {
5149 ql_log(ql_log_warn, base_vha, 0xe070,
5150 "This adapter does not support target mode.\n");
5154 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
5155 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
5157 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
5159 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
5161 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
5162 "Unable to allocate struct qla_tgt\n");
5166 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
5167 base_vha->host->hostt->supported_mode |= MODE_TARGET;
5170 tgt->vha = base_vha;
5171 init_waitqueue_head(&tgt->waitQ);
5172 INIT_LIST_HEAD(&tgt->sess_list);
5173 INIT_LIST_HEAD(&tgt->del_sess_list);
5174 INIT_DELAYED_WORK(&tgt->sess_del_work,
5175 (void (*)(struct work_struct *))qlt_del_sess_work_fn);
5176 spin_lock_init(&tgt->sess_work_lock);
5177 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
5178 INIT_LIST_HEAD(&tgt->sess_works_list);
5179 spin_lock_init(&tgt->srr_lock);
5180 INIT_LIST_HEAD(&tgt->srr_ctio_list);
5181 INIT_LIST_HEAD(&tgt->srr_imm_list);
5182 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
5183 atomic_set(&tgt->tgt_global_resets_count, 0);
5185 base_vha->vha_tgt.qla_tgt = tgt;
5187 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
5188 "qla_target(%d): using 64 Bit PCI addressing",
5190 tgt->tgt_enable_64bit_addr = 1;
5192 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
5193 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
5194 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
5196 if (base_vha->fc_vport)
5199 mutex_lock(&qla_tgt_mutex);
5200 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
5201 mutex_unlock(&qla_tgt_mutex);
5206 /* Must be called under tgt_host_action_mutex */
5207 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
5209 if (!vha->vha_tgt.qla_tgt)
5212 if (vha->fc_vport) {
5213 qlt_release(vha->vha_tgt.qla_tgt);
5217 /* free left over qfull cmds */
5218 qlt_init_term_exchange(vha);
5220 mutex_lock(&qla_tgt_mutex);
5221 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
5222 mutex_unlock(&qla_tgt_mutex);
5224 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
5226 qlt_release(vha->vha_tgt.qla_tgt);
5231 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
5236 pr_debug("qla2xxx HW vha->node_name: ");
5237 for (i = 0; i < WWN_SIZE; i++)
5238 pr_debug("%02x ", vha->node_name[i]);
5240 pr_debug("qla2xxx HW vha->port_name: ");
5241 for (i = 0; i < WWN_SIZE; i++)
5242 pr_debug("%02x ", vha->port_name[i]);
5245 pr_debug("qla2xxx passed configfs WWPN: ");
5246 put_unaligned_be64(wwpn, b);
5247 for (i = 0; i < WWN_SIZE; i++)
5248 pr_debug("%02x ", b[i]);
5253 * qla_tgt_lport_register - register lport with external module
5255 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
5256 * @wwpn: Passwd FC target WWPN
5257 * @callback: lport initialization callback for tcm_qla2xxx code
5258 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
5260 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
5261 u64 npiv_wwpn, u64 npiv_wwnn,
5262 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
5264 struct qla_tgt *tgt;
5265 struct scsi_qla_host *vha;
5266 struct qla_hw_data *ha;
5267 struct Scsi_Host *host;
5268 unsigned long flags;
5272 mutex_lock(&qla_tgt_mutex);
5273 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
5281 if (!(host->hostt->supported_mode & MODE_TARGET))
5284 spin_lock_irqsave(&ha->hardware_lock, flags);
5285 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
5286 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
5288 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5291 if (tgt->tgt_stop) {
5292 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
5294 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5297 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5299 if (!scsi_host_get(host)) {
5300 ql_dbg(ql_dbg_tgt, vha, 0xe068,
5301 "Unable to scsi_host_get() for"
5302 " qla2xxx scsi_host\n");
5305 qlt_lport_dump(vha, phys_wwpn, b);
5307 if (memcmp(vha->port_name, b, WWN_SIZE)) {
5308 scsi_host_put(host);
5311 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
5313 scsi_host_put(host);
5315 mutex_unlock(&qla_tgt_mutex);
5318 mutex_unlock(&qla_tgt_mutex);
5322 EXPORT_SYMBOL(qlt_lport_register);
5325 * qla_tgt_lport_deregister - Degister lport
5327 * @vha: Registered scsi_qla_host pointer
5329 void qlt_lport_deregister(struct scsi_qla_host *vha)
5331 struct qla_hw_data *ha = vha->hw;
5332 struct Scsi_Host *sh = vha->host;
5334 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
5336 vha->vha_tgt.target_lport_ptr = NULL;
5337 ha->tgt.tgt_ops = NULL;
5339 * Release the Scsi_Host reference for the underlying qla2xxx host
5343 EXPORT_SYMBOL(qlt_lport_deregister);
5345 /* Must be called under HW lock */
5346 void qlt_set_mode(struct scsi_qla_host *vha)
5348 struct qla_hw_data *ha = vha->hw;
5350 switch (ql2x_ini_mode) {
5351 case QLA2XXX_INI_MODE_DISABLED:
5352 case QLA2XXX_INI_MODE_EXCLUSIVE:
5353 vha->host->active_mode = MODE_TARGET;
5355 case QLA2XXX_INI_MODE_ENABLED:
5356 vha->host->active_mode |= MODE_TARGET;
5362 if (ha->tgt.ini_mode_force_reverse)
5363 qla_reverse_ini_mode(vha);
5366 /* Must be called under HW lock */
5367 void qlt_clear_mode(struct scsi_qla_host *vha)
5369 struct qla_hw_data *ha = vha->hw;
5371 switch (ql2x_ini_mode) {
5372 case QLA2XXX_INI_MODE_DISABLED:
5373 vha->host->active_mode = MODE_UNKNOWN;
5375 case QLA2XXX_INI_MODE_EXCLUSIVE:
5376 vha->host->active_mode = MODE_INITIATOR;
5378 case QLA2XXX_INI_MODE_ENABLED:
5379 vha->host->active_mode &= ~MODE_TARGET;
5385 if (ha->tgt.ini_mode_force_reverse)
5386 qla_reverse_ini_mode(vha);
5390 * qla_tgt_enable_vha - NO LOCK HELD
5392 * host_reset, bring up w/ Target Mode Enabled
5395 qlt_enable_vha(struct scsi_qla_host *vha)
5397 struct qla_hw_data *ha = vha->hw;
5398 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5399 unsigned long flags;
5400 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5403 ql_dbg(ql_dbg_tgt, vha, 0xe069,
5404 "Unable to locate qla_tgt pointer from"
5405 " struct qla_hw_data\n");
5410 spin_lock_irqsave(&ha->hardware_lock, flags);
5411 tgt->tgt_stopped = 0;
5413 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5416 qla24xx_disable_vp(vha);
5417 qla24xx_enable_vp(vha);
5419 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
5420 qla2xxx_wake_dpc(base_vha);
5421 qla2x00_wait_for_hba_online(base_vha);
5424 EXPORT_SYMBOL(qlt_enable_vha);
5427 * qla_tgt_disable_vha - NO LOCK HELD
5429 * Disable Target Mode and reset the adapter
5432 qlt_disable_vha(struct scsi_qla_host *vha)
5434 struct qla_hw_data *ha = vha->hw;
5435 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5436 unsigned long flags;
5439 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
5440 "Unable to locate qla_tgt pointer from"
5441 " struct qla_hw_data\n");
5446 spin_lock_irqsave(&ha->hardware_lock, flags);
5447 qlt_clear_mode(vha);
5448 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5450 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5451 qla2xxx_wake_dpc(vha);
5452 qla2x00_wait_for_hba_online(vha);
5456 * Called from qla_init.c:qla24xx_vport_create() contex to setup
5457 * the target mode specific struct scsi_qla_host and struct qla_hw_data
5461 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
5463 if (!qla_tgt_mode_enabled(vha))
5466 vha->vha_tgt.qla_tgt = NULL;
5468 mutex_init(&vha->vha_tgt.tgt_mutex);
5469 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
5471 qlt_clear_mode(vha);
5474 * NOTE: Currently the value is kept the same for <24xx and
5475 * >=24xx ISPs. If it is necessary to change it,
5476 * the check should be added for specific ISPs,
5477 * assigning the value appropriately.
5479 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
5481 qlt_add_target(ha, vha);
5485 qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
5488 * FC-4 Feature bit 0 indicates target functionality to the name server.
5490 if (qla_tgt_mode_enabled(vha)) {
5491 if (qla_ini_mode_enabled(vha))
5492 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
5494 ct_req->req.rff_id.fc4_feature = BIT_0;
5495 } else if (qla_ini_mode_enabled(vha)) {
5496 ct_req->req.rff_id.fc4_feature = BIT_1;
5501 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
5504 * Beginning of ATIO ring has initialization control block already built
5505 * by nvram config routine.
5507 * Returns 0 on success.
5510 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
5512 struct qla_hw_data *ha = vha->hw;
5514 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
5516 if (!qla_tgt_mode_enabled(vha))
5519 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
5520 pkt->u.raw.signature = ATIO_PROCESSED;
5527 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
5528 * @ha: SCSI driver HA context
5531 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
5533 struct qla_hw_data *ha = vha->hw;
5534 struct atio_from_isp *pkt;
5537 if (!vha->flags.online)
5540 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
5541 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
5542 cnt = pkt->u.raw.entry_count;
5544 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
5546 for (i = 0; i < cnt; i++) {
5547 ha->tgt.atio_ring_index++;
5548 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
5549 ha->tgt.atio_ring_index = 0;
5550 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
5552 ha->tgt.atio_ring_ptr++;
5554 pkt->u.raw.signature = ATIO_PROCESSED;
5555 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
5560 /* Adjust ring index */
5561 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
5565 qlt_24xx_config_rings(struct scsi_qla_host *vha)
5567 struct qla_hw_data *ha = vha->hw;
5568 if (!QLA_TGT_MODE_ENABLED())
5571 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
5572 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
5573 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
5575 if (IS_ATIO_MSIX_CAPABLE(ha)) {
5576 struct qla_msix_entry *msix = &ha->msix_entries[2];
5577 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
5579 icb->msix_atio = cpu_to_le16(msix->entry);
5580 ql_dbg(ql_dbg_init, vha, 0xf072,
5581 "Registering ICB vector 0x%x for atio que.\n",
5587 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
5589 struct qla_hw_data *ha = vha->hw;
5591 if (qla_tgt_mode_enabled(vha)) {
5592 if (!ha->tgt.saved_set) {
5593 /* We save only once */
5594 ha->tgt.saved_exchange_count = nv->exchange_count;
5595 ha->tgt.saved_firmware_options_1 =
5596 nv->firmware_options_1;
5597 ha->tgt.saved_firmware_options_2 =
5598 nv->firmware_options_2;
5599 ha->tgt.saved_firmware_options_3 =
5600 nv->firmware_options_3;
5601 ha->tgt.saved_set = 1;
5604 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
5606 /* Enable target mode */
5607 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
5609 /* Disable ini mode, if requested */
5610 if (!qla_ini_mode_enabled(vha))
5611 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
5613 /* Disable Full Login after LIP */
5614 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
5615 /* Enable initial LIP */
5616 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
5617 if (ql2xtgt_tape_enable)
5618 /* Enable FC Tape support */
5619 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
5621 /* Disable FC Tape support */
5622 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
5624 /* Disable Full Login after LIP */
5625 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
5626 /* Enable target PRLI control */
5627 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
5629 if (ha->tgt.saved_set) {
5630 nv->exchange_count = ha->tgt.saved_exchange_count;
5631 nv->firmware_options_1 =
5632 ha->tgt.saved_firmware_options_1;
5633 nv->firmware_options_2 =
5634 ha->tgt.saved_firmware_options_2;
5635 nv->firmware_options_3 =
5636 ha->tgt.saved_firmware_options_3;
5641 /* out-of-order frames reassembly */
5642 nv->firmware_options_3 |= BIT_6|BIT_9;
5644 if (ha->tgt.enable_class_2) {
5645 if (vha->flags.init_done)
5646 fc_host_supported_classes(vha->host) =
5647 FC_COS_CLASS2 | FC_COS_CLASS3;
5649 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
5651 if (vha->flags.init_done)
5652 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
5654 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
5659 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
5660 struct init_cb_24xx *icb)
5662 struct qla_hw_data *ha = vha->hw;
5664 if (ha->tgt.node_name_set) {
5665 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
5666 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
5671 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
5673 struct qla_hw_data *ha = vha->hw;
5675 if (!QLA_TGT_MODE_ENABLED())
5678 if (qla_tgt_mode_enabled(vha)) {
5679 if (!ha->tgt.saved_set) {
5680 /* We save only once */
5681 ha->tgt.saved_exchange_count = nv->exchange_count;
5682 ha->tgt.saved_firmware_options_1 =
5683 nv->firmware_options_1;
5684 ha->tgt.saved_firmware_options_2 =
5685 nv->firmware_options_2;
5686 ha->tgt.saved_firmware_options_3 =
5687 nv->firmware_options_3;
5688 ha->tgt.saved_set = 1;
5691 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
5693 /* Enable target mode */
5694 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
5696 /* Disable ini mode, if requested */
5697 if (!qla_ini_mode_enabled(vha))
5698 nv->firmware_options_1 |=
5699 __constant_cpu_to_le32(BIT_5);
5701 /* Disable Full Login after LIP */
5702 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
5703 /* Enable initial LIP */
5704 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
5705 if (ql2xtgt_tape_enable)
5706 /* Enable FC tape support */
5707 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
5709 /* Disable FC tape support */
5710 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
5712 /* Disable Full Login after LIP */
5713 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
5714 /* Enable target PRLI control */
5715 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
5717 if (ha->tgt.saved_set) {
5718 nv->exchange_count = ha->tgt.saved_exchange_count;
5719 nv->firmware_options_1 =
5720 ha->tgt.saved_firmware_options_1;
5721 nv->firmware_options_2 =
5722 ha->tgt.saved_firmware_options_2;
5723 nv->firmware_options_3 =
5724 ha->tgt.saved_firmware_options_3;
5729 /* out-of-order frames reassembly */
5730 nv->firmware_options_3 |= BIT_6|BIT_9;
5732 if (ha->tgt.enable_class_2) {
5733 if (vha->flags.init_done)
5734 fc_host_supported_classes(vha->host) =
5735 FC_COS_CLASS2 | FC_COS_CLASS3;
5737 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
5739 if (vha->flags.init_done)
5740 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
5742 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
5747 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
5748 struct init_cb_81xx *icb)
5750 struct qla_hw_data *ha = vha->hw;
5752 if (!QLA_TGT_MODE_ENABLED())
5755 if (ha->tgt.node_name_set) {
5756 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
5757 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
5762 qlt_83xx_iospace_config(struct qla_hw_data *ha)
5764 if (!QLA_TGT_MODE_ENABLED())
5767 ha->msix_count += 1; /* For ATIO Q */
5771 qlt_24xx_process_response_error(struct scsi_qla_host *vha,
5772 struct sts_entry_24xx *pkt)
5774 switch (pkt->entry_type) {
5775 case ABTS_RECV_24XX:
5776 case ABTS_RESP_24XX:
5778 case NOTIFY_ACK_TYPE:
5787 qlt_modify_vp_config(struct scsi_qla_host *vha,
5788 struct vp_config_entry_24xx *vpmod)
5790 if (qla_tgt_mode_enabled(vha))
5791 vpmod->options_idx1 &= ~BIT_5;
5792 /* Disable ini mode, if requested */
5793 if (!qla_ini_mode_enabled(vha))
5794 vpmod->options_idx1 &= ~BIT_4;
5798 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
5800 if (!QLA_TGT_MODE_ENABLED())
5803 if (ha->mqenable || IS_QLA83XX(ha)) {
5804 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
5805 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
5807 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
5808 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
5811 mutex_init(&base_vha->vha_tgt.tgt_mutex);
5812 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
5813 qlt_clear_mode(base_vha);
5817 qla83xx_msix_atio_q(int irq, void *dev_id)
5819 struct rsp_que *rsp;
5820 scsi_qla_host_t *vha;
5821 struct qla_hw_data *ha;
5822 unsigned long flags;
5824 rsp = (struct rsp_que *) dev_id;
5826 vha = pci_get_drvdata(ha->pdev);
5828 spin_lock_irqsave(&ha->hardware_lock, flags);
5830 qlt_24xx_process_atio_queue(vha);
5831 qla24xx_process_response_queue(vha, rsp);
5833 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5839 qlt_mem_alloc(struct qla_hw_data *ha)
5841 if (!QLA_TGT_MODE_ENABLED())
5844 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
5845 MAX_MULTI_ID_FABRIC, GFP_KERNEL);
5846 if (!ha->tgt.tgt_vp_map)
5849 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
5850 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
5851 &ha->tgt.atio_dma, GFP_KERNEL);
5852 if (!ha->tgt.atio_ring) {
5853 kfree(ha->tgt.tgt_vp_map);
5860 qlt_mem_free(struct qla_hw_data *ha)
5862 if (!QLA_TGT_MODE_ENABLED())
5865 if (ha->tgt.atio_ring) {
5866 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
5867 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
5870 kfree(ha->tgt.tgt_vp_map);
5873 /* vport_slock to be held by the caller */
5875 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
5877 if (!QLA_TGT_MODE_ENABLED())
5882 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
5885 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
5888 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
5891 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
5896 static int __init qlt_parse_ini_mode(void)
5898 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
5899 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
5900 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
5901 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
5902 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
5903 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
5910 int __init qlt_init(void)
5914 if (!qlt_parse_ini_mode()) {
5915 ql_log(ql_log_fatal, NULL, 0xe06b,
5916 "qlt_parse_ini_mode() failed\n");
5920 if (!QLA_TGT_MODE_ENABLED())
5923 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
5924 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
5925 qla_tgt_mgmt_cmd), 0, NULL);
5926 if (!qla_tgt_mgmt_cmd_cachep) {
5927 ql_log(ql_log_fatal, NULL, 0xe06d,
5928 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
5932 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
5933 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
5934 if (!qla_tgt_mgmt_cmd_mempool) {
5935 ql_log(ql_log_fatal, NULL, 0xe06e,
5936 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
5938 goto out_mgmt_cmd_cachep;
5941 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
5943 ql_log(ql_log_fatal, NULL, 0xe06f,
5944 "alloc_workqueue for qla_tgt_wq failed\n");
5946 goto out_cmd_mempool;
5949 * Return 1 to signal that initiator-mode is being disabled
5951 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
5954 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
5955 out_mgmt_cmd_cachep:
5956 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
5962 if (!QLA_TGT_MODE_ENABLED())
5965 destroy_workqueue(qla_tgt_wq);
5966 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
5967 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);