1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level;
42 module_param_named(debug_level, isert_debug_level, int, 0644);
43 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex);
46 static LIST_HEAD(device_list);
47 static struct workqueue_struct *isert_comp_wq;
48 static struct workqueue_struct *isert_release_wq;
51 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
53 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
56 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
58 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
59 struct isert_rdma_wr *wr);
61 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
63 isert_rdma_post_recvl(struct isert_conn *isert_conn);
65 isert_rdma_accept(struct isert_conn *isert_conn);
66 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
68 static void isert_release_work(struct work_struct *work);
71 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
73 return (conn->pi_support &&
74 cmd->prot_op != TARGET_PROT_NORMAL);
79 isert_qp_event_callback(struct ib_event *e, void *context)
81 struct isert_conn *isert_conn = context;
83 isert_err("%s (%d): conn %p\n",
84 ib_event_msg(e->event), e->event, isert_conn);
87 case IB_EVENT_COMM_EST:
88 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
90 case IB_EVENT_QP_LAST_WQE_REACHED:
91 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
99 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
103 ret = ib_query_device(ib_dev, devattr);
105 isert_err("ib_query_device() failed: %d\n", ret);
108 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
109 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
114 static struct isert_comp *
115 isert_comp_get(struct isert_conn *isert_conn)
117 struct isert_device *device = isert_conn->device;
118 struct isert_comp *comp;
121 mutex_lock(&device_list_mutex);
122 for (i = 0; i < device->comps_used; i++)
123 if (device->comps[i].active_qps <
124 device->comps[min].active_qps)
126 comp = &device->comps[min];
128 mutex_unlock(&device_list_mutex);
130 isert_info("conn %p, using comp %p min_index: %d\n",
131 isert_conn, comp, min);
137 isert_comp_put(struct isert_comp *comp)
139 mutex_lock(&device_list_mutex);
141 mutex_unlock(&device_list_mutex);
144 static struct ib_qp *
145 isert_create_qp(struct isert_conn *isert_conn,
146 struct isert_comp *comp,
147 struct rdma_cm_id *cma_id)
149 struct isert_device *device = isert_conn->device;
150 struct ib_qp_init_attr attr;
153 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
154 attr.event_handler = isert_qp_event_callback;
155 attr.qp_context = isert_conn;
156 attr.send_cq = comp->cq;
157 attr.recv_cq = comp->cq;
158 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
159 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
160 attr.cap.max_send_sge = device->dev_attr.max_sge;
161 isert_conn->max_sge = min(device->dev_attr.max_sge,
162 device->dev_attr.max_sge_rd);
163 attr.cap.max_recv_sge = 1;
164 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
165 attr.qp_type = IB_QPT_RC;
166 if (device->pi_capable)
167 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
169 ret = rdma_create_qp(cma_id, device->pd, &attr);
171 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
179 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
181 struct isert_comp *comp;
184 comp = isert_comp_get(isert_conn);
185 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
186 if (IS_ERR(isert_conn->qp)) {
187 ret = PTR_ERR(isert_conn->qp);
193 isert_comp_put(comp);
198 isert_cq_event_callback(struct ib_event *e, void *context)
200 isert_dbg("event: %d\n", e->event);
204 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
206 struct isert_device *device = isert_conn->device;
207 struct ib_device *ib_dev = device->ib_device;
208 struct iser_rx_desc *rx_desc;
209 struct ib_sge *rx_sg;
213 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
214 sizeof(struct iser_rx_desc), GFP_KERNEL);
215 if (!isert_conn->rx_descs)
218 rx_desc = isert_conn->rx_descs;
220 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
221 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
222 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
223 if (ib_dma_mapping_error(ib_dev, dma_addr))
226 rx_desc->dma_addr = dma_addr;
228 rx_sg = &rx_desc->rx_sg;
229 rx_sg->addr = rx_desc->dma_addr;
230 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
231 rx_sg->lkey = device->pd->local_dma_lkey;
237 rx_desc = isert_conn->rx_descs;
238 for (j = 0; j < i; j++, rx_desc++) {
239 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
240 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
242 kfree(isert_conn->rx_descs);
243 isert_conn->rx_descs = NULL;
245 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
251 isert_free_rx_descriptors(struct isert_conn *isert_conn)
253 struct ib_device *ib_dev = isert_conn->device->ib_device;
254 struct iser_rx_desc *rx_desc;
257 if (!isert_conn->rx_descs)
260 rx_desc = isert_conn->rx_descs;
261 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
262 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
263 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
266 kfree(isert_conn->rx_descs);
267 isert_conn->rx_descs = NULL;
270 static void isert_cq_work(struct work_struct *);
271 static void isert_cq_callback(struct ib_cq *, void *);
274 isert_free_comps(struct isert_device *device)
278 for (i = 0; i < device->comps_used; i++) {
279 struct isert_comp *comp = &device->comps[i];
282 cancel_work_sync(&comp->work);
283 ib_destroy_cq(comp->cq);
286 kfree(device->comps);
290 isert_alloc_comps(struct isert_device *device,
291 struct ib_device_attr *attr)
293 int i, max_cqe, ret = 0;
295 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
296 device->ib_device->num_comp_vectors));
298 isert_info("Using %d CQs, %s supports %d vectors support "
299 "Fast registration %d pi_capable %d\n",
300 device->comps_used, device->ib_device->name,
301 device->ib_device->num_comp_vectors, device->use_fastreg,
304 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
306 if (!device->comps) {
307 isert_err("Unable to allocate completion contexts\n");
311 max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
313 for (i = 0; i < device->comps_used; i++) {
314 struct ib_cq_init_attr cq_attr = {};
315 struct isert_comp *comp = &device->comps[i];
317 comp->device = device;
318 INIT_WORK(&comp->work, isert_cq_work);
319 cq_attr.cqe = max_cqe;
320 cq_attr.comp_vector = i;
321 comp->cq = ib_create_cq(device->ib_device,
323 isert_cq_event_callback,
326 if (IS_ERR(comp->cq)) {
327 isert_err("Unable to allocate cq\n");
328 ret = PTR_ERR(comp->cq);
333 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
340 isert_free_comps(device);
345 isert_create_device_ib_res(struct isert_device *device)
347 struct ib_device_attr *dev_attr;
350 dev_attr = &device->dev_attr;
351 ret = isert_query_device(device->ib_device, dev_attr);
355 /* asign function handlers */
356 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
357 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
358 device->use_fastreg = 1;
359 device->reg_rdma_mem = isert_reg_rdma;
360 device->unreg_rdma_mem = isert_unreg_rdma;
362 device->use_fastreg = 0;
363 device->reg_rdma_mem = isert_map_rdma;
364 device->unreg_rdma_mem = isert_unmap_cmd;
367 ret = isert_alloc_comps(device, dev_attr);
371 device->pd = ib_alloc_pd(device->ib_device);
372 if (IS_ERR(device->pd)) {
373 ret = PTR_ERR(device->pd);
374 isert_err("failed to allocate pd, device %p, ret=%d\n",
379 /* Check signature cap */
380 device->pi_capable = dev_attr->device_cap_flags &
381 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
386 isert_free_comps(device);
391 isert_free_device_ib_res(struct isert_device *device)
393 isert_info("device %p\n", device);
395 ib_dealloc_pd(device->pd);
396 isert_free_comps(device);
400 isert_device_put(struct isert_device *device)
402 mutex_lock(&device_list_mutex);
404 isert_info("device %p refcount %d\n", device, device->refcount);
405 if (!device->refcount) {
406 isert_free_device_ib_res(device);
407 list_del(&device->dev_node);
410 mutex_unlock(&device_list_mutex);
413 static struct isert_device *
414 isert_device_get(struct rdma_cm_id *cma_id)
416 struct isert_device *device;
419 mutex_lock(&device_list_mutex);
420 list_for_each_entry(device, &device_list, dev_node) {
421 if (device->ib_device->node_guid == cma_id->device->node_guid) {
423 isert_info("Found iser device %p refcount %d\n",
424 device, device->refcount);
425 mutex_unlock(&device_list_mutex);
430 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
432 mutex_unlock(&device_list_mutex);
433 return ERR_PTR(-ENOMEM);
436 INIT_LIST_HEAD(&device->dev_node);
438 device->ib_device = cma_id->device;
439 ret = isert_create_device_ib_res(device);
442 mutex_unlock(&device_list_mutex);
447 list_add_tail(&device->dev_node, &device_list);
448 isert_info("Created a new iser device %p refcount %d\n",
449 device, device->refcount);
450 mutex_unlock(&device_list_mutex);
456 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
458 struct fast_reg_descriptor *fr_desc, *tmp;
461 if (list_empty(&isert_conn->fr_pool))
464 isert_info("Freeing conn %p fastreg pool", isert_conn);
466 list_for_each_entry_safe(fr_desc, tmp,
467 &isert_conn->fr_pool, list) {
468 list_del(&fr_desc->list);
469 ib_dereg_mr(fr_desc->data_mr);
470 if (fr_desc->pi_ctx) {
471 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
472 ib_dereg_mr(fr_desc->pi_ctx->sig_mr);
473 kfree(fr_desc->pi_ctx);
479 if (i < isert_conn->fr_pool_size)
480 isert_warn("Pool still has %d regions registered\n",
481 isert_conn->fr_pool_size - i);
485 isert_create_pi_ctx(struct fast_reg_descriptor *desc,
486 struct ib_device *device,
489 struct pi_context *pi_ctx;
492 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
494 isert_err("Failed to allocate pi context\n");
498 pi_ctx->prot_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
499 ISCSI_ISER_SG_TABLESIZE);
500 if (IS_ERR(pi_ctx->prot_mr)) {
501 isert_err("Failed to allocate prot frmr err=%ld\n",
502 PTR_ERR(pi_ctx->prot_mr));
503 ret = PTR_ERR(pi_ctx->prot_mr);
506 desc->ind |= ISERT_PROT_KEY_VALID;
508 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
509 if (IS_ERR(pi_ctx->sig_mr)) {
510 isert_err("Failed to allocate signature enabled mr err=%ld\n",
511 PTR_ERR(pi_ctx->sig_mr));
512 ret = PTR_ERR(pi_ctx->sig_mr);
516 desc->pi_ctx = pi_ctx;
517 desc->ind |= ISERT_SIG_KEY_VALID;
518 desc->ind &= ~ISERT_PROTECTED;
523 ib_dereg_mr(pi_ctx->prot_mr);
531 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
532 struct fast_reg_descriptor *fr_desc)
534 fr_desc->data_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
535 ISCSI_ISER_SG_TABLESIZE);
536 if (IS_ERR(fr_desc->data_mr)) {
537 isert_err("Failed to allocate data frmr err=%ld\n",
538 PTR_ERR(fr_desc->data_mr));
539 return PTR_ERR(fr_desc->data_mr);
541 fr_desc->ind |= ISERT_DATA_KEY_VALID;
543 isert_dbg("Created fr_desc %p\n", fr_desc);
549 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
551 struct fast_reg_descriptor *fr_desc;
552 struct isert_device *device = isert_conn->device;
553 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
554 struct se_node_acl *se_nacl = se_sess->se_node_acl;
557 * Setup the number of FRMRs based upon the number of tags
558 * available to session in iscsi_target_locate_portal().
560 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
561 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
563 isert_conn->fr_pool_size = 0;
564 for (i = 0; i < tag_num; i++) {
565 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
567 isert_err("Failed to allocate fast_reg descriptor\n");
572 ret = isert_create_fr_desc(device->ib_device,
573 device->pd, fr_desc);
575 isert_err("Failed to create fastreg descriptor err=%d\n",
581 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
582 isert_conn->fr_pool_size++;
585 isert_dbg("Creating conn %p fastreg pool size=%d",
586 isert_conn, isert_conn->fr_pool_size);
591 isert_conn_free_fastreg_pool(isert_conn);
596 isert_init_conn(struct isert_conn *isert_conn)
598 isert_conn->state = ISER_CONN_INIT;
599 INIT_LIST_HEAD(&isert_conn->node);
600 init_completion(&isert_conn->login_comp);
601 init_completion(&isert_conn->login_req_comp);
602 init_completion(&isert_conn->wait);
603 kref_init(&isert_conn->kref);
604 mutex_init(&isert_conn->mutex);
605 spin_lock_init(&isert_conn->pool_lock);
606 INIT_LIST_HEAD(&isert_conn->fr_pool);
607 INIT_WORK(&isert_conn->release_work, isert_release_work);
611 isert_free_login_buf(struct isert_conn *isert_conn)
613 struct ib_device *ib_dev = isert_conn->device->ib_device;
615 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
616 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
617 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
618 ISCSI_DEF_MAX_RECV_SEG_LEN,
620 kfree(isert_conn->login_buf);
624 isert_alloc_login_buf(struct isert_conn *isert_conn,
625 struct ib_device *ib_dev)
629 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
630 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
631 if (!isert_conn->login_buf) {
632 isert_err("Unable to allocate isert_conn->login_buf\n");
636 isert_conn->login_req_buf = isert_conn->login_buf;
637 isert_conn->login_rsp_buf = isert_conn->login_buf +
638 ISCSI_DEF_MAX_RECV_SEG_LEN;
640 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
641 isert_conn->login_buf, isert_conn->login_req_buf,
642 isert_conn->login_rsp_buf);
644 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
645 (void *)isert_conn->login_req_buf,
646 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
648 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
650 isert_err("login_req_dma mapping error: %d\n", ret);
651 isert_conn->login_req_dma = 0;
655 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
656 (void *)isert_conn->login_rsp_buf,
657 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
659 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
661 isert_err("login_rsp_dma mapping error: %d\n", ret);
662 isert_conn->login_rsp_dma = 0;
663 goto out_req_dma_map;
669 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
670 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
672 kfree(isert_conn->login_buf);
677 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
679 struct isert_np *isert_np = cma_id->context;
680 struct iscsi_np *np = isert_np->np;
681 struct isert_conn *isert_conn;
682 struct isert_device *device;
685 spin_lock_bh(&np->np_thread_lock);
687 spin_unlock_bh(&np->np_thread_lock);
688 isert_dbg("iscsi_np is not enabled, reject connect request\n");
689 return rdma_reject(cma_id, NULL, 0);
691 spin_unlock_bh(&np->np_thread_lock);
693 isert_dbg("cma_id: %p, portal: %p\n",
694 cma_id, cma_id->context);
696 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
700 isert_init_conn(isert_conn);
701 isert_conn->cm_id = cma_id;
703 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
707 device = isert_device_get(cma_id);
708 if (IS_ERR(device)) {
709 ret = PTR_ERR(device);
710 goto out_rsp_dma_map;
712 isert_conn->device = device;
714 /* Set max inflight RDMA READ requests */
715 isert_conn->initiator_depth = min_t(u8,
716 event->param.conn.initiator_depth,
717 device->dev_attr.max_qp_init_rd_atom);
718 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
720 ret = isert_conn_setup_qp(isert_conn, cma_id);
724 ret = isert_rdma_post_recvl(isert_conn);
728 ret = isert_rdma_accept(isert_conn);
732 mutex_lock(&isert_np->mutex);
733 list_add_tail(&isert_conn->node, &isert_np->accepted);
734 mutex_unlock(&isert_np->mutex);
739 isert_device_put(device);
741 isert_free_login_buf(isert_conn);
744 rdma_reject(cma_id, NULL, 0);
749 isert_connect_release(struct isert_conn *isert_conn)
751 struct isert_device *device = isert_conn->device;
753 isert_dbg("conn %p\n", isert_conn);
757 if (device->use_fastreg)
758 isert_conn_free_fastreg_pool(isert_conn);
760 isert_free_rx_descriptors(isert_conn);
761 if (isert_conn->cm_id)
762 rdma_destroy_id(isert_conn->cm_id);
764 if (isert_conn->qp) {
765 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
767 isert_comp_put(comp);
768 ib_destroy_qp(isert_conn->qp);
771 if (isert_conn->login_buf)
772 isert_free_login_buf(isert_conn);
774 isert_device_put(device);
780 isert_connected_handler(struct rdma_cm_id *cma_id)
782 struct isert_conn *isert_conn = cma_id->qp->qp_context;
783 struct isert_np *isert_np = cma_id->context;
785 isert_info("conn %p\n", isert_conn);
787 mutex_lock(&isert_conn->mutex);
788 isert_conn->state = ISER_CONN_UP;
789 kref_get(&isert_conn->kref);
790 mutex_unlock(&isert_conn->mutex);
792 mutex_lock(&isert_np->mutex);
793 list_move_tail(&isert_conn->node, &isert_np->pending);
794 mutex_unlock(&isert_np->mutex);
796 isert_info("np %p: Allow accept_np to continue\n", isert_np);
801 isert_release_kref(struct kref *kref)
803 struct isert_conn *isert_conn = container_of(kref,
804 struct isert_conn, kref);
806 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
809 isert_connect_release(isert_conn);
813 isert_put_conn(struct isert_conn *isert_conn)
815 kref_put(&isert_conn->kref, isert_release_kref);
819 * isert_conn_terminate() - Initiate connection termination
820 * @isert_conn: isert connection struct
823 * In case the connection state is BOUND, move state
824 * to TEMINATING and start teardown sequence (rdma_disconnect).
825 * In case the connection state is UP, complete flush as well.
827 * This routine must be called with mutex held. Thus it is
828 * safe to call multiple times.
831 isert_conn_terminate(struct isert_conn *isert_conn)
835 switch (isert_conn->state) {
836 case ISER_CONN_TERMINATING:
839 case ISER_CONN_BOUND:
840 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
841 isert_info("Terminating conn %p state %d\n",
842 isert_conn, isert_conn->state);
843 isert_conn->state = ISER_CONN_TERMINATING;
844 err = rdma_disconnect(isert_conn->cm_id);
846 isert_warn("Failed rdma_disconnect isert_conn %p\n",
850 isert_warn("conn %p teminating in state %d\n",
851 isert_conn, isert_conn->state);
856 isert_np_cma_handler(struct isert_np *isert_np,
857 enum rdma_cm_event_type event)
859 isert_dbg("%s (%d): isert np %p\n",
860 rdma_event_msg(event), event, isert_np);
863 case RDMA_CM_EVENT_DEVICE_REMOVAL:
864 isert_np->cm_id = NULL;
866 case RDMA_CM_EVENT_ADDR_CHANGE:
867 isert_np->cm_id = isert_setup_id(isert_np);
868 if (IS_ERR(isert_np->cm_id)) {
869 isert_err("isert np %p setup id failed: %ld\n",
870 isert_np, PTR_ERR(isert_np->cm_id));
871 isert_np->cm_id = NULL;
875 isert_err("isert np %p Unexpected event %d\n",
883 isert_disconnected_handler(struct rdma_cm_id *cma_id,
884 enum rdma_cm_event_type event)
886 struct isert_np *isert_np = cma_id->context;
887 struct isert_conn *isert_conn;
888 bool terminating = false;
890 if (isert_np->cm_id == cma_id)
891 return isert_np_cma_handler(cma_id->context, event);
893 isert_conn = cma_id->qp->qp_context;
895 mutex_lock(&isert_conn->mutex);
896 terminating = (isert_conn->state == ISER_CONN_TERMINATING);
897 isert_conn_terminate(isert_conn);
898 mutex_unlock(&isert_conn->mutex);
900 isert_info("conn %p completing wait\n", isert_conn);
901 complete(&isert_conn->wait);
906 mutex_lock(&isert_np->mutex);
907 if (!list_empty(&isert_conn->node)) {
908 list_del_init(&isert_conn->node);
909 isert_put_conn(isert_conn);
910 queue_work(isert_release_wq, &isert_conn->release_work);
912 mutex_unlock(&isert_np->mutex);
919 isert_connect_error(struct rdma_cm_id *cma_id)
921 struct isert_conn *isert_conn = cma_id->qp->qp_context;
923 list_del_init(&isert_conn->node);
924 isert_conn->cm_id = NULL;
925 isert_put_conn(isert_conn);
931 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
935 isert_info("%s (%d): status %d id %p np %p\n",
936 rdma_event_msg(event->event), event->event,
937 event->status, cma_id, cma_id->context);
939 switch (event->event) {
940 case RDMA_CM_EVENT_CONNECT_REQUEST:
941 ret = isert_connect_request(cma_id, event);
943 isert_err("failed handle connect request %d\n", ret);
945 case RDMA_CM_EVENT_ESTABLISHED:
946 isert_connected_handler(cma_id);
948 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
949 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
950 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
951 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
952 ret = isert_disconnected_handler(cma_id, event->event);
954 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
955 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
956 case RDMA_CM_EVENT_CONNECT_ERROR:
957 ret = isert_connect_error(cma_id);
960 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
968 isert_post_recvm(struct isert_conn *isert_conn, u32 count)
970 struct ib_recv_wr *rx_wr, *rx_wr_failed;
972 struct iser_rx_desc *rx_desc;
974 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
975 rx_desc = &isert_conn->rx_descs[i];
976 rx_wr->wr_id = (uintptr_t)rx_desc;
977 rx_wr->sg_list = &rx_desc->rx_sg;
979 rx_wr->next = rx_wr + 1;
982 rx_wr->next = NULL; /* mark end of work requests list */
984 isert_conn->post_recv_buf_count += count;
985 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
988 isert_err("ib_post_recv() failed with ret: %d\n", ret);
989 isert_conn->post_recv_buf_count -= count;
996 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
998 struct ib_recv_wr *rx_wr_failed, rx_wr;
1001 rx_wr.wr_id = (uintptr_t)rx_desc;
1002 rx_wr.sg_list = &rx_desc->rx_sg;
1006 isert_conn->post_recv_buf_count++;
1007 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
1009 isert_err("ib_post_recv() failed with ret: %d\n", ret);
1010 isert_conn->post_recv_buf_count--;
1017 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
1019 struct ib_device *ib_dev = isert_conn->cm_id->device;
1020 struct ib_send_wr send_wr, *send_wr_failed;
1023 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
1024 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1026 send_wr.next = NULL;
1027 send_wr.wr_id = (uintptr_t)tx_desc;
1028 send_wr.sg_list = tx_desc->tx_sg;
1029 send_wr.num_sge = tx_desc->num_sge;
1030 send_wr.opcode = IB_WR_SEND;
1031 send_wr.send_flags = IB_SEND_SIGNALED;
1033 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
1035 isert_err("ib_post_send() failed, ret: %d\n", ret);
1041 isert_create_send_desc(struct isert_conn *isert_conn,
1042 struct isert_cmd *isert_cmd,
1043 struct iser_tx_desc *tx_desc)
1045 struct isert_device *device = isert_conn->device;
1046 struct ib_device *ib_dev = device->ib_device;
1048 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
1049 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1051 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
1052 tx_desc->iser_header.flags = ISER_VER;
1054 tx_desc->num_sge = 1;
1055 tx_desc->isert_cmd = isert_cmd;
1057 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
1058 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
1059 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
1064 isert_init_tx_hdrs(struct isert_conn *isert_conn,
1065 struct iser_tx_desc *tx_desc)
1067 struct isert_device *device = isert_conn->device;
1068 struct ib_device *ib_dev = device->ib_device;
1071 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1072 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1073 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
1074 isert_err("ib_dma_mapping_error() failed\n");
1078 tx_desc->dma_addr = dma_addr;
1079 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1080 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1081 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
1083 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1084 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
1085 tx_desc->tx_sg[0].lkey);
1091 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1092 struct ib_send_wr *send_wr)
1094 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1096 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
1097 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
1098 send_wr->opcode = IB_WR_SEND;
1099 send_wr->sg_list = &tx_desc->tx_sg[0];
1100 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
1101 send_wr->send_flags = IB_SEND_SIGNALED;
1105 isert_rdma_post_recvl(struct isert_conn *isert_conn)
1107 struct ib_recv_wr rx_wr, *rx_wr_fail;
1111 memset(&sge, 0, sizeof(struct ib_sge));
1112 sge.addr = isert_conn->login_req_dma;
1113 sge.length = ISER_RX_LOGIN_SIZE;
1114 sge.lkey = isert_conn->device->pd->local_dma_lkey;
1116 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1117 sge.addr, sge.length, sge.lkey);
1119 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1120 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
1121 rx_wr.sg_list = &sge;
1124 isert_conn->post_recv_buf_count++;
1125 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
1127 isert_err("ib_post_recv() failed: %d\n", ret);
1128 isert_conn->post_recv_buf_count--;
1135 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1138 struct isert_conn *isert_conn = conn->context;
1139 struct isert_device *device = isert_conn->device;
1140 struct ib_device *ib_dev = device->ib_device;
1141 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
1144 isert_create_send_desc(isert_conn, NULL, tx_desc);
1146 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1147 sizeof(struct iscsi_hdr));
1149 isert_init_tx_hdrs(isert_conn, tx_desc);
1152 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1154 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1155 length, DMA_TO_DEVICE);
1157 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1159 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1160 length, DMA_TO_DEVICE);
1162 tx_dsg->addr = isert_conn->login_rsp_dma;
1163 tx_dsg->length = length;
1164 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
1165 tx_desc->num_sge = 2;
1167 if (!login->login_failed) {
1168 if (login->login_complete) {
1169 if (!conn->sess->sess_ops->SessionType &&
1170 isert_conn->device->use_fastreg) {
1171 ret = isert_conn_create_fastreg_pool(isert_conn);
1173 isert_err("Conn: %p failed to create"
1174 " fastreg pool\n", isert_conn);
1179 ret = isert_alloc_rx_descriptors(isert_conn);
1183 ret = isert_post_recvm(isert_conn,
1184 ISERT_QP_MAX_RECV_DTOS);
1188 /* Now we are in FULL_FEATURE phase */
1189 mutex_lock(&isert_conn->mutex);
1190 isert_conn->state = ISER_CONN_FULL_FEATURE;
1191 mutex_unlock(&isert_conn->mutex);
1195 ret = isert_rdma_post_recvl(isert_conn);
1200 ret = isert_post_send(isert_conn, tx_desc);
1208 isert_rx_login_req(struct isert_conn *isert_conn)
1210 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1211 int rx_buflen = isert_conn->login_req_len;
1212 struct iscsi_conn *conn = isert_conn->conn;
1213 struct iscsi_login *login = conn->conn_login;
1216 isert_info("conn %p\n", isert_conn);
1218 WARN_ON_ONCE(!login);
1220 if (login->first_request) {
1221 struct iscsi_login_req *login_req =
1222 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1224 * Setup the initial iscsi_login values from the leading
1225 * login request PDU.
1227 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1228 login->current_stage =
1229 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1231 login->version_min = login_req->min_version;
1232 login->version_max = login_req->max_version;
1233 memcpy(login->isid, login_req->isid, 6);
1234 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1235 login->init_task_tag = login_req->itt;
1236 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1237 login->cid = be16_to_cpu(login_req->cid);
1238 login->tsih = be16_to_cpu(login_req->tsih);
1241 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1243 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1244 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1245 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1246 MAX_KEY_VALUE_PAIRS);
1247 memcpy(login->req_buf, &rx_desc->data[0], size);
1249 if (login->first_request) {
1250 complete(&isert_conn->login_comp);
1253 schedule_delayed_work(&conn->login_work, 0);
1256 static struct iscsi_cmd
1257 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
1259 struct isert_conn *isert_conn = conn->context;
1260 struct isert_cmd *isert_cmd;
1261 struct iscsi_cmd *cmd;
1263 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1265 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1268 isert_cmd = iscsit_priv_cmd(cmd);
1269 isert_cmd->conn = isert_conn;
1270 isert_cmd->iscsi_cmd = cmd;
1271 isert_cmd->rx_desc = rx_desc;
1277 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1278 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1279 struct iser_rx_desc *rx_desc, unsigned char *buf)
1281 struct iscsi_conn *conn = isert_conn->conn;
1282 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1283 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1284 bool dump_payload = false;
1285 unsigned int data_len;
1287 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1291 imm_data = cmd->immediate_data;
1292 imm_data_len = cmd->first_burst_len;
1293 unsol_data = cmd->unsolicited_data;
1294 data_len = cmd->se_cmd.data_length;
1296 if (imm_data && imm_data_len == data_len)
1297 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1298 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1301 } else if (rc > 0) {
1302 dump_payload = true;
1309 if (imm_data_len != data_len) {
1310 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1311 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1312 &rx_desc->data[0], imm_data_len);
1313 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1314 sg_nents, imm_data_len);
1316 sg_init_table(&isert_cmd->sg, 1);
1317 cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1318 cmd->se_cmd.t_data_nents = 1;
1319 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
1320 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1324 cmd->write_data_done += imm_data_len;
1326 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1327 spin_lock_bh(&cmd->istate_lock);
1328 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1329 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1330 spin_unlock_bh(&cmd->istate_lock);
1334 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1336 if (!rc && dump_payload == false && unsol_data)
1337 iscsit_set_unsoliticed_dataout(cmd);
1338 else if (dump_payload && imm_data)
1339 target_put_sess_cmd(&cmd->se_cmd);
1345 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1346 struct iser_rx_desc *rx_desc, unsigned char *buf)
1348 struct scatterlist *sg_start;
1349 struct iscsi_conn *conn = isert_conn->conn;
1350 struct iscsi_cmd *cmd = NULL;
1351 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1352 u32 unsol_data_len = ntoh24(hdr->dlength);
1353 int rc, sg_nents, sg_off, page_off;
1355 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1361 * FIXME: Unexpected unsolicited_data out
1363 if (!cmd->unsolicited_data) {
1364 isert_err("Received unexpected solicited data payload\n");
1369 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1370 "write_data_done: %u, data_length: %u\n",
1371 unsol_data_len, cmd->write_data_done,
1372 cmd->se_cmd.data_length);
1374 sg_off = cmd->write_data_done / PAGE_SIZE;
1375 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1376 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1377 page_off = cmd->write_data_done % PAGE_SIZE;
1379 * FIXME: Non page-aligned unsolicited_data out
1382 isert_err("unexpected non-page aligned data payload\n");
1386 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1387 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1388 sg_nents, &rx_desc->data[0], unsol_data_len);
1390 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1393 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1398 * multiple data-outs on the same command can arrive -
1399 * so post the buffer before hand
1401 rc = isert_post_recv(isert_conn, rx_desc);
1403 isert_err("ib_post_recv failed with %d\n", rc);
1410 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1411 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1414 struct iscsi_conn *conn = isert_conn->conn;
1415 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1418 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1422 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1425 return iscsit_process_nop_out(conn, cmd, hdr);
1429 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1430 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1431 struct iscsi_text *hdr)
1433 struct iscsi_conn *conn = isert_conn->conn;
1434 u32 payload_length = ntoh24(hdr->dlength);
1436 unsigned char *text_in = NULL;
1438 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1442 if (payload_length) {
1443 text_in = kzalloc(payload_length, GFP_KERNEL);
1445 isert_err("Unable to allocate text_in of payload_length: %u\n",
1450 cmd->text_in_ptr = text_in;
1452 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1454 return iscsit_process_text_cmd(conn, cmd, hdr);
1458 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1459 uint32_t read_stag, uint64_t read_va,
1460 uint32_t write_stag, uint64_t write_va)
1462 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1463 struct iscsi_conn *conn = isert_conn->conn;
1464 struct iscsi_cmd *cmd;
1465 struct isert_cmd *isert_cmd;
1467 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1469 if (conn->sess->sess_ops->SessionType &&
1470 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1471 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1472 " ignoring\n", opcode);
1477 case ISCSI_OP_SCSI_CMD:
1478 cmd = isert_allocate_cmd(conn, rx_desc);
1482 isert_cmd = iscsit_priv_cmd(cmd);
1483 isert_cmd->read_stag = read_stag;
1484 isert_cmd->read_va = read_va;
1485 isert_cmd->write_stag = write_stag;
1486 isert_cmd->write_va = write_va;
1488 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1489 rx_desc, (unsigned char *)hdr);
1491 case ISCSI_OP_NOOP_OUT:
1492 cmd = isert_allocate_cmd(conn, rx_desc);
1496 isert_cmd = iscsit_priv_cmd(cmd);
1497 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1498 rx_desc, (unsigned char *)hdr);
1500 case ISCSI_OP_SCSI_DATA_OUT:
1501 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1502 (unsigned char *)hdr);
1504 case ISCSI_OP_SCSI_TMFUNC:
1505 cmd = isert_allocate_cmd(conn, rx_desc);
1509 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1510 (unsigned char *)hdr);
1512 case ISCSI_OP_LOGOUT:
1513 cmd = isert_allocate_cmd(conn, rx_desc);
1517 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1520 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
1521 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1523 cmd = isert_allocate_cmd(conn, rx_desc);
1528 isert_cmd = iscsit_priv_cmd(cmd);
1529 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1530 rx_desc, (struct iscsi_text *)hdr);
1533 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1542 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1544 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1545 uint64_t read_va = 0, write_va = 0;
1546 uint32_t read_stag = 0, write_stag = 0;
1548 switch (iser_hdr->flags & 0xF0) {
1550 if (iser_hdr->flags & ISER_RSV) {
1551 read_stag = be32_to_cpu(iser_hdr->read_stag);
1552 read_va = be64_to_cpu(iser_hdr->read_va);
1553 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1554 read_stag, (unsigned long long)read_va);
1556 if (iser_hdr->flags & ISER_WSV) {
1557 write_stag = be32_to_cpu(iser_hdr->write_stag);
1558 write_va = be64_to_cpu(iser_hdr->write_va);
1559 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1560 write_stag, (unsigned long long)write_va);
1563 isert_dbg("ISER ISCSI_CTRL PDU\n");
1566 isert_err("iSER Hello message\n");
1569 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1573 isert_rx_opcode(isert_conn, rx_desc,
1574 read_stag, read_va, write_stag, write_va);
1578 isert_rcv_completion(struct iser_rx_desc *desc,
1579 struct isert_conn *isert_conn,
1582 struct ib_device *ib_dev = isert_conn->cm_id->device;
1583 struct iscsi_hdr *hdr;
1587 if ((char *)desc == isert_conn->login_req_buf) {
1588 rx_dma = isert_conn->login_req_dma;
1589 rx_buflen = ISER_RX_LOGIN_SIZE;
1590 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1593 rx_dma = desc->dma_addr;
1594 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1595 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1599 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1601 hdr = &desc->iscsi_header;
1602 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1603 hdr->opcode, hdr->itt, hdr->flags,
1604 (int)(xfer_len - ISER_HEADERS_LEN));
1606 if ((char *)desc == isert_conn->login_req_buf) {
1607 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1608 if (isert_conn->conn) {
1609 struct iscsi_login *login = isert_conn->conn->conn_login;
1611 if (login && !login->first_request)
1612 isert_rx_login_req(isert_conn);
1614 mutex_lock(&isert_conn->mutex);
1615 complete(&isert_conn->login_req_comp);
1616 mutex_unlock(&isert_conn->mutex);
1618 isert_rx_do_work(desc, isert_conn);
1621 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1624 isert_conn->post_recv_buf_count--;
1628 isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1629 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1630 enum iser_ib_op_code op, struct isert_data_buf *data)
1632 struct ib_device *ib_dev = isert_conn->cm_id->device;
1634 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1635 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1637 data->len = length - offset;
1638 data->offset = offset;
1639 data->sg_off = data->offset / PAGE_SIZE;
1641 data->sg = &sg[data->sg_off];
1642 data->nents = min_t(unsigned int, nents - data->sg_off,
1643 ISCSI_ISER_SG_TABLESIZE);
1644 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1647 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1649 if (unlikely(!data->dma_nents)) {
1650 isert_err("Cmd: unable to dma map SGs %p\n", sg);
1654 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1655 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1661 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1663 struct ib_device *ib_dev = isert_conn->cm_id->device;
1665 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1666 memset(data, 0, sizeof(*data));
1672 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1674 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1676 isert_dbg("Cmd %p\n", isert_cmd);
1679 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1680 isert_unmap_data_buf(isert_conn, &wr->data);
1684 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1690 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
1697 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1699 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1701 isert_dbg("Cmd %p\n", isert_cmd);
1704 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
1705 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1706 isert_unmap_data_buf(isert_conn, &wr->prot);
1707 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1709 spin_lock_bh(&isert_conn->pool_lock);
1710 list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
1711 spin_unlock_bh(&isert_conn->pool_lock);
1716 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1717 isert_unmap_data_buf(isert_conn, &wr->data);
1725 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1727 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1728 struct isert_conn *isert_conn = isert_cmd->conn;
1729 struct iscsi_conn *conn = isert_conn->conn;
1730 struct isert_device *device = isert_conn->device;
1731 struct iscsi_text_rsp *hdr;
1733 isert_dbg("Cmd %p\n", isert_cmd);
1735 switch (cmd->iscsi_opcode) {
1736 case ISCSI_OP_SCSI_CMD:
1737 spin_lock_bh(&conn->cmd_lock);
1738 if (!list_empty(&cmd->i_conn_node))
1739 list_del_init(&cmd->i_conn_node);
1740 spin_unlock_bh(&conn->cmd_lock);
1742 if (cmd->data_direction == DMA_TO_DEVICE) {
1743 iscsit_stop_dataout_timer(cmd);
1745 * Check for special case during comp_err where
1746 * WRITE_PENDING has been handed off from core,
1747 * but requires an extra target_put_sess_cmd()
1748 * before transport_generic_free_cmd() below.
1751 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1752 struct se_cmd *se_cmd = &cmd->se_cmd;
1754 target_put_sess_cmd(se_cmd);
1758 device->unreg_rdma_mem(isert_cmd, isert_conn);
1759 transport_generic_free_cmd(&cmd->se_cmd, 0);
1761 case ISCSI_OP_SCSI_TMFUNC:
1762 spin_lock_bh(&conn->cmd_lock);
1763 if (!list_empty(&cmd->i_conn_node))
1764 list_del_init(&cmd->i_conn_node);
1765 spin_unlock_bh(&conn->cmd_lock);
1767 transport_generic_free_cmd(&cmd->se_cmd, 0);
1769 case ISCSI_OP_REJECT:
1770 case ISCSI_OP_NOOP_OUT:
1772 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1773 /* If the continue bit is on, keep the command alive */
1774 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1777 spin_lock_bh(&conn->cmd_lock);
1778 if (!list_empty(&cmd->i_conn_node))
1779 list_del_init(&cmd->i_conn_node);
1780 spin_unlock_bh(&conn->cmd_lock);
1783 * Handle special case for REJECT when iscsi_add_reject*() has
1784 * overwritten the original iscsi_opcode assignment, and the
1785 * associated cmd->se_cmd needs to be released.
1787 if (cmd->se_cmd.se_tfo != NULL) {
1788 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1790 transport_generic_free_cmd(&cmd->se_cmd, 0);
1797 iscsit_release_cmd(cmd);
1803 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1805 if (tx_desc->dma_addr != 0) {
1806 isert_dbg("unmap single for tx_desc->dma_addr\n");
1807 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1808 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1809 tx_desc->dma_addr = 0;
1814 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1815 struct ib_device *ib_dev, bool comp_err)
1817 if (isert_cmd->pdu_buf_dma != 0) {
1818 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1819 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1820 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1821 isert_cmd->pdu_buf_dma = 0;
1824 isert_unmap_tx_desc(tx_desc, ib_dev);
1825 isert_put_cmd(isert_cmd, comp_err);
1829 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1831 struct ib_mr_status mr_status;
1834 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1836 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1837 goto fail_mr_status;
1840 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1842 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1844 switch (mr_status.sig_err.err_type) {
1845 case IB_SIG_BAD_GUARD:
1846 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1848 case IB_SIG_BAD_REFTAG:
1849 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1851 case IB_SIG_BAD_APPTAG:
1852 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1855 sec_offset_err = mr_status.sig_err.sig_err_offset;
1856 do_div(sec_offset_err, block_size);
1857 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1859 isert_err("PI error found type %d at sector 0x%llx "
1860 "expected 0x%x vs actual 0x%x\n",
1861 mr_status.sig_err.err_type,
1862 (unsigned long long)se_cmd->bad_sector,
1863 mr_status.sig_err.expected,
1864 mr_status.sig_err.actual);
1873 isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1874 struct isert_cmd *isert_cmd)
1876 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1877 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1878 struct se_cmd *se_cmd = &cmd->se_cmd;
1879 struct isert_conn *isert_conn = isert_cmd->conn;
1880 struct isert_device *device = isert_conn->device;
1883 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1884 ret = isert_check_pi_status(se_cmd,
1885 wr->fr_desc->pi_ctx->sig_mr);
1886 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1889 device->unreg_rdma_mem(isert_cmd, isert_conn);
1890 wr->rdma_wr_num = 0;
1892 transport_send_check_condition_and_sense(se_cmd,
1895 isert_put_response(isert_conn->conn, cmd);
1899 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1900 struct isert_cmd *isert_cmd)
1902 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1903 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1904 struct se_cmd *se_cmd = &cmd->se_cmd;
1905 struct isert_conn *isert_conn = isert_cmd->conn;
1906 struct isert_device *device = isert_conn->device;
1909 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1910 ret = isert_check_pi_status(se_cmd,
1911 wr->fr_desc->pi_ctx->sig_mr);
1912 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1915 iscsit_stop_dataout_timer(cmd);
1916 device->unreg_rdma_mem(isert_cmd, isert_conn);
1917 cmd->write_data_done = wr->data.len;
1918 wr->rdma_wr_num = 0;
1920 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1921 spin_lock_bh(&cmd->istate_lock);
1922 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1923 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1924 spin_unlock_bh(&cmd->istate_lock);
1927 target_put_sess_cmd(se_cmd);
1928 transport_send_check_condition_and_sense(se_cmd,
1931 target_execute_cmd(se_cmd);
1936 isert_do_control_comp(struct work_struct *work)
1938 struct isert_cmd *isert_cmd = container_of(work,
1939 struct isert_cmd, comp_work);
1940 struct isert_conn *isert_conn = isert_cmd->conn;
1941 struct ib_device *ib_dev = isert_conn->cm_id->device;
1942 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1944 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1946 switch (cmd->i_state) {
1947 case ISTATE_SEND_TASKMGTRSP:
1948 iscsit_tmr_post_handler(cmd, cmd->conn);
1949 case ISTATE_SEND_REJECT: /* FALLTHRU */
1950 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1951 cmd->i_state = ISTATE_SENT_STATUS;
1952 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1955 case ISTATE_SEND_LOGOUTRSP:
1956 iscsit_logout_post_handler(cmd, cmd->conn);
1959 isert_err("Unknown i_state %d\n", cmd->i_state);
1966 isert_response_completion(struct iser_tx_desc *tx_desc,
1967 struct isert_cmd *isert_cmd,
1968 struct isert_conn *isert_conn,
1969 struct ib_device *ib_dev)
1971 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1973 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1974 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1975 cmd->i_state == ISTATE_SEND_REJECT ||
1976 cmd->i_state == ISTATE_SEND_TEXTRSP) {
1977 isert_unmap_tx_desc(tx_desc, ib_dev);
1979 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1980 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1984 cmd->i_state = ISTATE_SENT_STATUS;
1985 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1989 isert_snd_completion(struct iser_tx_desc *tx_desc,
1990 struct isert_conn *isert_conn)
1992 struct ib_device *ib_dev = isert_conn->cm_id->device;
1993 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1994 struct isert_rdma_wr *wr;
1997 isert_unmap_tx_desc(tx_desc, ib_dev);
2000 wr = &isert_cmd->rdma_wr;
2002 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
2004 switch (wr->iser_ib_op) {
2006 isert_response_completion(tx_desc, isert_cmd,
2007 isert_conn, ib_dev);
2009 case ISER_IB_RDMA_WRITE:
2010 isert_completion_rdma_write(tx_desc, isert_cmd);
2012 case ISER_IB_RDMA_READ:
2013 isert_completion_rdma_read(tx_desc, isert_cmd);
2016 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
2023 * is_isert_tx_desc() - Indicate if the completion wr_id
2024 * is a TX descriptor or not.
2025 * @isert_conn: iser connection
2026 * @wr_id: completion WR identifier
2028 * Since we cannot rely on wc opcode in FLUSH errors
2029 * we must work around it by checking if the wr_id address
2030 * falls in the iser connection rx_descs buffer. If so
2031 * it is an RX descriptor, otherwize it is a TX.
2034 is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
2036 void *start = isert_conn->rx_descs;
2037 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
2039 if ((wr_id >= start && wr_id < start + len) ||
2040 (wr_id == isert_conn->login_req_buf))
2047 isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
2049 if (wc->wr_id == ISER_BEACON_WRID) {
2050 isert_info("conn %p completing wait_comp_err\n",
2052 complete(&isert_conn->wait_comp_err);
2053 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
2054 struct ib_device *ib_dev = isert_conn->cm_id->device;
2055 struct isert_cmd *isert_cmd;
2056 struct iser_tx_desc *desc;
2058 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2059 isert_cmd = desc->isert_cmd;
2061 isert_unmap_tx_desc(desc, ib_dev);
2063 isert_completion_put(desc, isert_cmd, ib_dev, true);
2065 isert_conn->post_recv_buf_count--;
2066 if (!isert_conn->post_recv_buf_count &&
2067 isert_conn->state >= ISER_CONN_BOUND)
2068 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2073 isert_handle_wc(struct ib_wc *wc)
2075 struct isert_conn *isert_conn;
2076 struct iser_tx_desc *tx_desc;
2077 struct iser_rx_desc *rx_desc;
2079 isert_conn = wc->qp->qp_context;
2080 if (likely(wc->status == IB_WC_SUCCESS)) {
2081 if (wc->opcode == IB_WC_RECV) {
2082 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
2083 isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
2085 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2086 isert_snd_completion(tx_desc, isert_conn);
2089 if (wc->status != IB_WC_WR_FLUSH_ERR)
2090 isert_err("%s (%d): wr id %llx vend_err %x\n",
2091 ib_wc_status_msg(wc->status), wc->status,
2092 wc->wr_id, wc->vendor_err);
2094 isert_dbg("%s (%d): wr id %llx\n",
2095 ib_wc_status_msg(wc->status), wc->status,
2098 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2099 isert_cq_comp_err(isert_conn, wc);
2104 isert_cq_work(struct work_struct *work)
2106 enum { isert_poll_budget = 65536 };
2107 struct isert_comp *comp = container_of(work, struct isert_comp,
2109 struct ib_wc *const wcs = comp->wcs;
2110 int i, n, completed = 0;
2112 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2113 for (i = 0; i < n; i++)
2114 isert_handle_wc(&wcs[i]);
2117 if (completed >= isert_poll_budget)
2121 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
2125 isert_cq_callback(struct ib_cq *cq, void *context)
2127 struct isert_comp *comp = context;
2129 queue_work(isert_comp_wq, &comp->work);
2133 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2135 struct ib_send_wr *wr_failed;
2138 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2140 isert_err("ib_post_recv failed with %d\n", ret);
2144 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
2147 isert_err("ib_post_send failed with %d\n", ret);
2154 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2156 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2157 struct isert_conn *isert_conn = conn->context;
2158 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2159 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2160 &isert_cmd->tx_desc.iscsi_header;
2162 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2163 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2164 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2166 * Attach SENSE DATA payload to iSCSI Response PDU
2168 if (cmd->se_cmd.sense_buffer &&
2169 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2170 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2171 struct isert_device *device = isert_conn->device;
2172 struct ib_device *ib_dev = device->ib_device;
2173 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2174 u32 padding, pdu_len;
2176 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2178 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2180 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2181 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
2182 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
2184 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2185 (void *)cmd->sense_buffer, pdu_len,
2188 isert_cmd->pdu_buf_len = pdu_len;
2189 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2190 tx_dsg->length = pdu_len;
2191 tx_dsg->lkey = device->pd->local_dma_lkey;
2192 isert_cmd->tx_desc.num_sge = 2;
2195 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2197 isert_dbg("Posting SCSI Response\n");
2199 return isert_post_response(isert_conn, isert_cmd);
2203 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2205 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2206 struct isert_conn *isert_conn = conn->context;
2207 struct isert_device *device = isert_conn->device;
2209 spin_lock_bh(&conn->cmd_lock);
2210 if (!list_empty(&cmd->i_conn_node))
2211 list_del_init(&cmd->i_conn_node);
2212 spin_unlock_bh(&conn->cmd_lock);
2214 if (cmd->data_direction == DMA_TO_DEVICE)
2215 iscsit_stop_dataout_timer(cmd);
2217 device->unreg_rdma_mem(isert_cmd, isert_conn);
2220 static enum target_prot_op
2221 isert_get_sup_prot_ops(struct iscsi_conn *conn)
2223 struct isert_conn *isert_conn = conn->context;
2224 struct isert_device *device = isert_conn->device;
2226 if (conn->tpg->tpg_attrib.t10_pi) {
2227 if (device->pi_capable) {
2228 isert_info("conn %p PI offload enabled\n", isert_conn);
2229 isert_conn->pi_support = true;
2230 return TARGET_PROT_ALL;
2234 isert_info("conn %p PI offload disabled\n", isert_conn);
2235 isert_conn->pi_support = false;
2237 return TARGET_PROT_NORMAL;
2241 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2242 bool nopout_response)
2244 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2245 struct isert_conn *isert_conn = conn->context;
2246 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2248 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2249 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2250 &isert_cmd->tx_desc.iscsi_header,
2252 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2253 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2255 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
2257 return isert_post_response(isert_conn, isert_cmd);
2261 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2263 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2264 struct isert_conn *isert_conn = conn->context;
2265 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2267 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2268 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2269 &isert_cmd->tx_desc.iscsi_header);
2270 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2271 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2273 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
2275 return isert_post_response(isert_conn, isert_cmd);
2279 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2281 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2282 struct isert_conn *isert_conn = conn->context;
2283 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2285 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2286 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2287 &isert_cmd->tx_desc.iscsi_header);
2288 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2289 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2291 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
2293 return isert_post_response(isert_conn, isert_cmd);
2297 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2299 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2300 struct isert_conn *isert_conn = conn->context;
2301 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2302 struct isert_device *device = isert_conn->device;
2303 struct ib_device *ib_dev = device->ib_device;
2304 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2305 struct iscsi_reject *hdr =
2306 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
2308 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2309 iscsit_build_reject(cmd, conn, hdr);
2310 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2312 hton24(hdr->dlength, ISCSI_HDR_LEN);
2313 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2314 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2316 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2317 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2318 tx_dsg->length = ISCSI_HDR_LEN;
2319 tx_dsg->lkey = device->pd->local_dma_lkey;
2320 isert_cmd->tx_desc.num_sge = 2;
2322 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2324 isert_dbg("conn %p Posting Reject\n", isert_conn);
2326 return isert_post_response(isert_conn, isert_cmd);
2330 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2332 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2333 struct isert_conn *isert_conn = conn->context;
2334 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2335 struct iscsi_text_rsp *hdr =
2336 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2340 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2341 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2346 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2349 struct isert_device *device = isert_conn->device;
2350 struct ib_device *ib_dev = device->ib_device;
2351 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2352 void *txt_rsp_buf = cmd->buf_ptr;
2354 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2355 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2357 isert_cmd->pdu_buf_len = txt_rsp_len;
2358 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2359 tx_dsg->length = txt_rsp_len;
2360 tx_dsg->lkey = device->pd->local_dma_lkey;
2361 isert_cmd->tx_desc.num_sge = 2;
2363 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2365 isert_dbg("conn %p Text Response\n", isert_conn);
2367 return isert_post_response(isert_conn, isert_cmd);
2371 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2372 struct ib_sge *ib_sge, struct ib_rdma_wr *rdma_wr,
2373 u32 data_left, u32 offset)
2375 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2376 struct scatterlist *sg_start, *tmp_sg;
2377 struct isert_device *device = isert_conn->device;
2378 struct ib_device *ib_dev = device->ib_device;
2379 u32 sg_off, page_off;
2380 int i = 0, sg_nents;
2382 sg_off = offset / PAGE_SIZE;
2383 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2384 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2385 page_off = offset % PAGE_SIZE;
2387 rdma_wr->wr.sg_list = ib_sge;
2388 rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
2390 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2392 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2393 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2395 (unsigned long long)tmp_sg->dma_address,
2396 tmp_sg->length, page_off);
2398 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2399 ib_sge->length = min_t(u32, data_left,
2400 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2401 ib_sge->lkey = device->pd->local_dma_lkey;
2403 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2404 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2406 data_left -= ib_sge->length;
2410 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2413 rdma_wr->wr.num_sge = ++i;
2414 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2415 rdma_wr->wr.sg_list, rdma_wr->wr.num_sge);
2417 return rdma_wr->wr.num_sge;
2421 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2422 struct isert_rdma_wr *wr)
2424 struct se_cmd *se_cmd = &cmd->se_cmd;
2425 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2426 struct isert_conn *isert_conn = conn->context;
2427 struct isert_data_buf *data = &wr->data;
2428 struct ib_rdma_wr *rdma_wr;
2429 struct ib_sge *ib_sge;
2430 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2431 int ret = 0, i, ib_sge_cnt;
2433 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2435 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2436 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2437 se_cmd->t_data_nents, se_cmd->data_length,
2438 offset, wr->iser_ib_op, &wr->data);
2442 data_left = data->len;
2443 offset = data->offset;
2445 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2447 isert_warn("Unable to allocate ib_sge\n");
2451 wr->ib_sge = ib_sge;
2453 wr->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2454 wr->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) * wr->rdma_wr_num,
2457 isert_dbg("Unable to allocate wr->rdma_wr\n");
2462 wr->isert_cmd = isert_cmd;
2463 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2465 for (i = 0; i < wr->rdma_wr_num; i++) {
2466 rdma_wr = &isert_cmd->rdma_wr.rdma_wr[i];
2467 data_len = min(data_left, rdma_write_max);
2469 rdma_wr->wr.send_flags = 0;
2470 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2471 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
2472 rdma_wr->remote_addr = isert_cmd->read_va + offset;
2473 rdma_wr->rkey = isert_cmd->read_stag;
2474 if (i + 1 == wr->rdma_wr_num)
2475 rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
2477 rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
2479 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
2480 rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
2481 rdma_wr->rkey = isert_cmd->write_stag;
2482 if (i + 1 == wr->rdma_wr_num)
2483 rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
2485 rdma_wr->wr.next = &wr->rdma_wr[i + 1].wr;
2488 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2489 rdma_wr, data_len, offset);
2490 ib_sge += ib_sge_cnt;
2493 va_offset += data_len;
2494 data_left -= data_len;
2499 isert_unmap_data_buf(isert_conn, data);
2505 isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2509 memset(inv_wr, 0, sizeof(*inv_wr));
2510 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
2511 inv_wr->opcode = IB_WR_LOCAL_INV;
2512 inv_wr->ex.invalidate_rkey = mr->rkey;
2515 rkey = ib_inc_rkey(mr->rkey);
2516 ib_update_fast_reg_key(mr, rkey);
2520 isert_fast_reg_mr(struct isert_conn *isert_conn,
2521 struct fast_reg_descriptor *fr_desc,
2522 struct isert_data_buf *mem,
2523 enum isert_indicator ind,
2526 struct isert_device *device = isert_conn->device;
2527 struct ib_device *ib_dev = device->ib_device;
2529 struct ib_reg_wr reg_wr;
2530 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
2533 if (mem->dma_nents == 1) {
2534 sge->lkey = device->pd->local_dma_lkey;
2535 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2536 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2537 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2538 sge->addr, sge->length, sge->lkey);
2542 if (ind == ISERT_DATA_KEY_VALID)
2543 /* Registering data buffer */
2544 mr = fr_desc->data_mr;
2546 /* Registering protection buffer */
2547 mr = fr_desc->pi_ctx->prot_mr;
2549 if (!(fr_desc->ind & ind)) {
2550 isert_inv_rkey(&inv_wr, mr);
2554 n = ib_map_mr_sg(mr, mem->sg, mem->nents, PAGE_SIZE);
2555 if (unlikely(n != mem->nents)) {
2556 isert_err("failed to map mr sg (%d/%d)\n",
2558 return n < 0 ? n : -EINVAL;
2561 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2562 fr_desc, mem->nents, mem->offset);
2564 reg_wr.wr.next = NULL;
2565 reg_wr.wr.opcode = IB_WR_REG_MR;
2566 reg_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
2567 reg_wr.wr.send_flags = 0;
2568 reg_wr.wr.num_sge = 0;
2570 reg_wr.key = mr->lkey;
2571 reg_wr.access = IB_ACCESS_LOCAL_WRITE;
2576 wr->next = ®_wr.wr;
2578 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2580 isert_err("fast registration failed, ret:%d\n", ret);
2583 fr_desc->ind &= ~ind;
2585 sge->lkey = mr->lkey;
2586 sge->addr = mr->iova;
2587 sge->length = mr->length;
2589 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2590 sge->addr, sge->length, sge->lkey);
2596 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2597 struct ib_sig_domain *domain)
2599 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2600 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2601 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2602 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2604 * At the moment we hard code those, but if in the future
2605 * the target core would like to use it, we will take it
2608 domain->sig.dif.apptag_check_mask = 0xffff;
2609 domain->sig.dif.app_escape = true;
2610 domain->sig.dif.ref_escape = true;
2611 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2612 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2613 domain->sig.dif.ref_remap = true;
2617 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2619 switch (se_cmd->prot_op) {
2620 case TARGET_PROT_DIN_INSERT:
2621 case TARGET_PROT_DOUT_STRIP:
2622 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2623 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2625 case TARGET_PROT_DOUT_INSERT:
2626 case TARGET_PROT_DIN_STRIP:
2627 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2628 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2630 case TARGET_PROT_DIN_PASS:
2631 case TARGET_PROT_DOUT_PASS:
2632 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2633 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2636 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2644 isert_set_prot_checks(u8 prot_checks)
2646 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2647 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2648 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2652 isert_reg_sig_mr(struct isert_conn *isert_conn,
2653 struct se_cmd *se_cmd,
2654 struct isert_rdma_wr *rdma_wr,
2655 struct fast_reg_descriptor *fr_desc)
2657 struct ib_sig_handover_wr sig_wr;
2658 struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
2659 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2660 struct ib_sig_attrs sig_attrs;
2663 memset(&sig_attrs, 0, sizeof(sig_attrs));
2664 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2668 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2670 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2671 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
2675 memset(&sig_wr, 0, sizeof(sig_wr));
2676 sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
2677 sig_wr.wr.wr_id = ISER_FASTREG_LI_WRID;
2678 sig_wr.wr.sg_list = &rdma_wr->ib_sg[DATA];
2679 sig_wr.wr.num_sge = 1;
2680 sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
2681 sig_wr.sig_attrs = &sig_attrs;
2682 sig_wr.sig_mr = pi_ctx->sig_mr;
2683 if (se_cmd->t_prot_sg)
2684 sig_wr.prot = &rdma_wr->ib_sg[PROT];
2689 wr->next = &sig_wr.wr;
2691 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2693 isert_err("fast registration failed, ret:%d\n", ret);
2696 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2698 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2699 rdma_wr->ib_sg[SIG].addr = 0;
2700 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
2701 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2702 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2704 * We have protection guards on the wire
2705 * so we need to set a larget transfer
2707 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
2709 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2710 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2711 rdma_wr->ib_sg[SIG].lkey);
2717 isert_handle_prot_cmd(struct isert_conn *isert_conn,
2718 struct isert_cmd *isert_cmd,
2719 struct isert_rdma_wr *wr)
2721 struct isert_device *device = isert_conn->device;
2722 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2725 if (!wr->fr_desc->pi_ctx) {
2726 ret = isert_create_pi_ctx(wr->fr_desc,
2730 isert_err("conn %p failed to allocate pi_ctx\n",
2736 if (se_cmd->t_prot_sg) {
2737 ret = isert_map_data_buf(isert_conn, isert_cmd,
2739 se_cmd->t_prot_nents,
2740 se_cmd->prot_length,
2741 0, wr->iser_ib_op, &wr->prot);
2743 isert_err("conn %p failed to map protection buffer\n",
2748 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2749 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2750 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2752 isert_err("conn %p failed to fast reg mr\n",
2754 goto unmap_prot_cmd;
2758 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2760 isert_err("conn %p failed to fast reg mr\n",
2762 goto unmap_prot_cmd;
2764 wr->fr_desc->ind |= ISERT_PROTECTED;
2769 if (se_cmd->t_prot_sg)
2770 isert_unmap_data_buf(isert_conn, &wr->prot);
2776 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2777 struct isert_rdma_wr *wr)
2779 struct se_cmd *se_cmd = &cmd->se_cmd;
2780 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2781 struct isert_conn *isert_conn = conn->context;
2782 struct fast_reg_descriptor *fr_desc = NULL;
2783 struct ib_rdma_wr *rdma_wr;
2784 struct ib_sge *ib_sg;
2787 unsigned long flags;
2789 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2791 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2792 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2793 se_cmd->t_data_nents, se_cmd->data_length,
2794 offset, wr->iser_ib_op, &wr->data);
2798 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
2799 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2800 fr_desc = list_first_entry(&isert_conn->fr_pool,
2801 struct fast_reg_descriptor, list);
2802 list_del(&fr_desc->list);
2803 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2804 wr->fr_desc = fr_desc;
2807 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2808 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
2812 if (isert_prot_cmd(isert_conn, se_cmd)) {
2813 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
2817 ib_sg = &wr->ib_sg[SIG];
2819 ib_sg = &wr->ib_sg[DATA];
2822 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2823 wr->ib_sge = &wr->s_ib_sge;
2824 wr->rdma_wr_num = 1;
2825 memset(&wr->s_rdma_wr, 0, sizeof(wr->s_rdma_wr));
2826 wr->rdma_wr = &wr->s_rdma_wr;
2827 wr->isert_cmd = isert_cmd;
2829 rdma_wr = &isert_cmd->rdma_wr.s_rdma_wr;
2830 rdma_wr->wr.sg_list = &wr->s_ib_sge;
2831 rdma_wr->wr.num_sge = 1;
2832 rdma_wr->wr.wr_id = (uintptr_t)&isert_cmd->tx_desc;
2833 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2834 rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
2835 rdma_wr->remote_addr = isert_cmd->read_va;
2836 rdma_wr->rkey = isert_cmd->read_stag;
2837 rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2838 0 : IB_SEND_SIGNALED;
2840 rdma_wr->wr.opcode = IB_WR_RDMA_READ;
2841 rdma_wr->remote_addr = isert_cmd->write_va;
2842 rdma_wr->rkey = isert_cmd->write_stag;
2843 rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
2850 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2851 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
2852 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2854 isert_unmap_data_buf(isert_conn, &wr->data);
2860 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2862 struct se_cmd *se_cmd = &cmd->se_cmd;
2863 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2864 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2865 struct isert_conn *isert_conn = conn->context;
2866 struct isert_device *device = isert_conn->device;
2867 struct ib_send_wr *wr_failed;
2870 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2871 isert_cmd, se_cmd->data_length);
2873 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2874 rc = device->reg_rdma_mem(conn, cmd, wr);
2876 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2880 if (!isert_prot_cmd(isert_conn, se_cmd)) {
2882 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2884 isert_create_send_desc(isert_conn, isert_cmd,
2885 &isert_cmd->tx_desc);
2886 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2887 &isert_cmd->tx_desc.iscsi_header);
2888 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2889 isert_init_send_wr(isert_conn, isert_cmd,
2890 &isert_cmd->tx_desc.send_wr);
2891 isert_cmd->rdma_wr.s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
2892 wr->rdma_wr_num += 1;
2894 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2896 isert_err("ib_post_recv failed with %d\n", rc);
2901 rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
2903 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2905 if (!isert_prot_cmd(isert_conn, se_cmd))
2906 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2907 "READ\n", isert_cmd);
2909 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2916 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2918 struct se_cmd *se_cmd = &cmd->se_cmd;
2919 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2920 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2921 struct isert_conn *isert_conn = conn->context;
2922 struct isert_device *device = isert_conn->device;
2923 struct ib_send_wr *wr_failed;
2926 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2927 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2928 wr->iser_ib_op = ISER_IB_RDMA_READ;
2929 rc = device->reg_rdma_mem(conn, cmd, wr);
2931 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2935 rc = ib_post_send(isert_conn->qp, &wr->rdma_wr->wr, &wr_failed);
2937 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2939 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2946 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2948 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2953 spin_lock_bh(&conn->cmd_lock);
2954 list_del_init(&cmd->i_conn_node);
2955 spin_unlock_bh(&conn->cmd_lock);
2956 isert_put_cmd(isert_cmd, true);
2958 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2959 ret = isert_put_nopin(cmd, conn, false);
2962 isert_err("Unknown immediate state: 0x%02x\n", state);
2971 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2973 struct isert_conn *isert_conn = conn->context;
2977 case ISTATE_SEND_LOGOUTRSP:
2978 ret = isert_put_logout_rsp(cmd, conn);
2980 isert_conn->logout_posted = true;
2982 case ISTATE_SEND_NOPIN:
2983 ret = isert_put_nopin(cmd, conn, true);
2985 case ISTATE_SEND_TASKMGTRSP:
2986 ret = isert_put_tm_rsp(cmd, conn);
2988 case ISTATE_SEND_REJECT:
2989 ret = isert_put_reject(cmd, conn);
2991 case ISTATE_SEND_TEXTRSP:
2992 ret = isert_put_text_rsp(cmd, conn);
2994 case ISTATE_SEND_STATUS:
2996 * Special case for sending non GOOD SCSI status from TX thread
2997 * context during pre se_cmd excecution failure.
2999 ret = isert_put_response(conn, cmd);
3002 isert_err("Unknown response state: 0x%02x\n", state);
3011 isert_setup_id(struct isert_np *isert_np)
3013 struct iscsi_np *np = isert_np->np;
3014 struct rdma_cm_id *id;
3015 struct sockaddr *sa;
3018 sa = (struct sockaddr *)&np->np_sockaddr;
3019 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
3021 id = rdma_create_id(&init_net, isert_cma_handler, isert_np,
3022 RDMA_PS_TCP, IB_QPT_RC);
3024 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
3028 isert_dbg("id %p context %p\n", id, id->context);
3030 ret = rdma_bind_addr(id, sa);
3032 isert_err("rdma_bind_addr() failed: %d\n", ret);
3036 ret = rdma_listen(id, 0);
3038 isert_err("rdma_listen() failed: %d\n", ret);
3044 rdma_destroy_id(id);
3046 return ERR_PTR(ret);
3050 isert_setup_np(struct iscsi_np *np,
3051 struct sockaddr_storage *ksockaddr)
3053 struct isert_np *isert_np;
3054 struct rdma_cm_id *isert_lid;
3057 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3059 isert_err("Unable to allocate struct isert_np\n");
3062 sema_init(&isert_np->sem, 0);
3063 mutex_init(&isert_np->mutex);
3064 INIT_LIST_HEAD(&isert_np->accepted);
3065 INIT_LIST_HEAD(&isert_np->pending);
3069 * Setup the np->np_sockaddr from the passed sockaddr setup
3070 * in iscsi_target_configfs.c code..
3072 memcpy(&np->np_sockaddr, ksockaddr,
3073 sizeof(struct sockaddr_storage));
3075 isert_lid = isert_setup_id(isert_np);
3076 if (IS_ERR(isert_lid)) {
3077 ret = PTR_ERR(isert_lid);
3081 isert_np->cm_id = isert_lid;
3082 np->np_context = isert_np;
3093 isert_rdma_accept(struct isert_conn *isert_conn)
3095 struct rdma_cm_id *cm_id = isert_conn->cm_id;
3096 struct rdma_conn_param cp;
3099 memset(&cp, 0, sizeof(struct rdma_conn_param));
3100 cp.initiator_depth = isert_conn->initiator_depth;
3102 cp.rnr_retry_count = 7;
3104 ret = rdma_accept(cm_id, &cp);
3106 isert_err("rdma_accept() failed with: %d\n", ret);
3114 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3116 struct isert_conn *isert_conn = conn->context;
3119 isert_info("before login_req comp conn: %p\n", isert_conn);
3120 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3122 isert_err("isert_conn %p interrupted before got login req\n",
3126 reinit_completion(&isert_conn->login_req_comp);
3129 * For login requests after the first PDU, isert_rx_login_req() will
3130 * kick schedule_delayed_work(&conn->login_work) as the packet is
3131 * received, which turns this callback from iscsi_target_do_login_rx()
3134 if (!login->first_request)
3137 isert_rx_login_req(isert_conn);
3139 isert_info("before login_comp conn: %p\n", conn);
3140 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
3144 isert_info("processing login->req: %p\n", login->req);
3150 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3151 struct isert_conn *isert_conn)
3153 struct rdma_cm_id *cm_id = isert_conn->cm_id;
3154 struct rdma_route *cm_route = &cm_id->route;
3156 conn->login_family = np->np_sockaddr.ss_family;
3158 conn->login_sockaddr = cm_route->addr.dst_addr;
3159 conn->local_sockaddr = cm_route->addr.src_addr;
3163 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3165 struct isert_np *isert_np = np->np_context;
3166 struct isert_conn *isert_conn;
3170 ret = down_interruptible(&isert_np->sem);
3174 spin_lock_bh(&np->np_thread_lock);
3175 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3176 spin_unlock_bh(&np->np_thread_lock);
3177 isert_dbg("np_thread_state %d\n",
3178 np->np_thread_state);
3180 * No point in stalling here when np_thread
3181 * is in state RESET/SHUTDOWN/EXIT - bail
3185 spin_unlock_bh(&np->np_thread_lock);
3187 mutex_lock(&isert_np->mutex);
3188 if (list_empty(&isert_np->pending)) {
3189 mutex_unlock(&isert_np->mutex);
3192 isert_conn = list_first_entry(&isert_np->pending,
3193 struct isert_conn, node);
3194 list_del_init(&isert_conn->node);
3195 mutex_unlock(&isert_np->mutex);
3197 conn->context = isert_conn;
3198 isert_conn->conn = conn;
3199 isert_conn->state = ISER_CONN_BOUND;
3201 isert_set_conn_info(np, conn, isert_conn);
3203 isert_dbg("Processing isert_conn: %p\n", isert_conn);
3209 isert_free_np(struct iscsi_np *np)
3211 struct isert_np *isert_np = np->np_context;
3212 struct isert_conn *isert_conn, *n;
3214 if (isert_np->cm_id)
3215 rdma_destroy_id(isert_np->cm_id);
3218 * FIXME: At this point we don't have a good way to insure
3219 * that at this point we don't have hanging connections that
3220 * completed RDMA establishment but didn't start iscsi login
3221 * process. So work-around this by cleaning up what ever piled
3222 * up in accepted and pending lists.
3224 mutex_lock(&isert_np->mutex);
3225 if (!list_empty(&isert_np->pending)) {
3226 isert_info("Still have isert pending connections\n");
3227 list_for_each_entry_safe(isert_conn, n,
3230 isert_info("cleaning isert_conn %p state (%d)\n",
3231 isert_conn, isert_conn->state);
3232 isert_connect_release(isert_conn);
3236 if (!list_empty(&isert_np->accepted)) {
3237 isert_info("Still have isert accepted connections\n");
3238 list_for_each_entry_safe(isert_conn, n,
3239 &isert_np->accepted,
3241 isert_info("cleaning isert_conn %p state (%d)\n",
3242 isert_conn, isert_conn->state);
3243 isert_connect_release(isert_conn);
3246 mutex_unlock(&isert_np->mutex);
3248 np->np_context = NULL;
3252 static void isert_release_work(struct work_struct *work)
3254 struct isert_conn *isert_conn = container_of(work,
3258 isert_info("Starting release conn %p\n", isert_conn);
3260 wait_for_completion(&isert_conn->wait);
3262 mutex_lock(&isert_conn->mutex);
3263 isert_conn->state = ISER_CONN_DOWN;
3264 mutex_unlock(&isert_conn->mutex);
3266 isert_info("Destroying conn %p\n", isert_conn);
3267 isert_put_conn(isert_conn);
3271 isert_wait4logout(struct isert_conn *isert_conn)
3273 struct iscsi_conn *conn = isert_conn->conn;
3275 isert_info("conn %p\n", isert_conn);
3277 if (isert_conn->logout_posted) {
3278 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
3279 wait_for_completion_timeout(&conn->conn_logout_comp,
3280 SECONDS_FOR_LOGOUT_COMP * HZ);
3285 isert_wait4cmds(struct iscsi_conn *conn)
3287 isert_info("iscsi_conn %p\n", conn);
3290 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3291 target_wait_for_sess_cmds(conn->sess->se_sess);
3296 isert_wait4flush(struct isert_conn *isert_conn)
3298 struct ib_recv_wr *bad_wr;
3300 isert_info("conn %p\n", isert_conn);
3302 init_completion(&isert_conn->wait_comp_err);
3303 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3304 /* post an indication that all flush errors were consumed */
3305 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
3306 isert_err("conn %p failed to post beacon", isert_conn);
3310 wait_for_completion(&isert_conn->wait_comp_err);
3314 * isert_put_unsol_pending_cmds() - Drop commands waiting for
3315 * unsolicitate dataout
3316 * @conn: iscsi connection
3318 * We might still have commands that are waiting for unsolicited
3319 * dataouts messages. We must put the extra reference on those
3320 * before blocking on the target_wait_for_session_cmds
3323 isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
3325 struct iscsi_cmd *cmd, *tmp;
3326 static LIST_HEAD(drop_cmd_list);
3328 spin_lock_bh(&conn->cmd_lock);
3329 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
3330 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
3331 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
3332 (cmd->write_data_done < cmd->se_cmd.data_length))
3333 list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
3335 spin_unlock_bh(&conn->cmd_lock);
3337 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
3338 list_del_init(&cmd->i_conn_node);
3339 if (cmd->i_state != ISTATE_REMOVE) {
3340 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
3342 isert_info("conn %p dropping cmd %p\n", conn, cmd);
3343 isert_put_cmd(isert_cmd, true);
3348 static void isert_wait_conn(struct iscsi_conn *conn)
3350 struct isert_conn *isert_conn = conn->context;
3352 isert_info("Starting conn %p\n", isert_conn);
3354 mutex_lock(&isert_conn->mutex);
3356 * Only wait for wait_comp_err if the isert_conn made it
3357 * into full feature phase..
3359 if (isert_conn->state == ISER_CONN_INIT) {
3360 mutex_unlock(&isert_conn->mutex);
3363 isert_conn_terminate(isert_conn);
3364 mutex_unlock(&isert_conn->mutex);
3366 isert_wait4flush(isert_conn);
3367 isert_put_unsol_pending_cmds(conn);
3368 isert_wait4cmds(conn);
3369 isert_wait4logout(isert_conn);
3371 queue_work(isert_release_wq, &isert_conn->release_work);
3374 static void isert_free_conn(struct iscsi_conn *conn)
3376 struct isert_conn *isert_conn = conn->context;
3378 isert_wait4flush(isert_conn);
3379 isert_put_conn(isert_conn);
3382 static struct iscsit_transport iser_target_transport = {
3384 .transport_type = ISCSI_INFINIBAND,
3385 .priv_size = sizeof(struct isert_cmd),
3386 .owner = THIS_MODULE,
3387 .iscsit_setup_np = isert_setup_np,
3388 .iscsit_accept_np = isert_accept_np,
3389 .iscsit_free_np = isert_free_np,
3390 .iscsit_wait_conn = isert_wait_conn,
3391 .iscsit_free_conn = isert_free_conn,
3392 .iscsit_get_login_rx = isert_get_login_rx,
3393 .iscsit_put_login_tx = isert_put_login_tx,
3394 .iscsit_immediate_queue = isert_immediate_queue,
3395 .iscsit_response_queue = isert_response_queue,
3396 .iscsit_get_dataout = isert_get_dataout,
3397 .iscsit_queue_data_in = isert_put_datain,
3398 .iscsit_queue_status = isert_put_response,
3399 .iscsit_aborted_task = isert_aborted_task,
3400 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
3403 static int __init isert_init(void)
3407 isert_comp_wq = alloc_workqueue("isert_comp_wq",
3408 WQ_UNBOUND | WQ_HIGHPRI, 0);
3409 if (!isert_comp_wq) {
3410 isert_err("Unable to allocate isert_comp_wq\n");
3415 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3416 WQ_UNBOUND_MAX_ACTIVE);
3417 if (!isert_release_wq) {
3418 isert_err("Unable to allocate isert_release_wq\n");
3420 goto destroy_comp_wq;
3423 iscsit_register_transport(&iser_target_transport);
3424 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3429 destroy_workqueue(isert_comp_wq);
3434 static void __exit isert_exit(void)
3436 flush_scheduled_work();
3437 destroy_workqueue(isert_release_wq);
3438 destroy_workqueue(isert_comp_wq);
3439 iscsit_unregister_transport(&iser_target_transport);
3440 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3443 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3444 MODULE_VERSION("1.0");
3445 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3446 MODULE_LICENSE("GPL");
3448 module_init(isert_init);
3449 module_exit(isert_exit);