1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level;
42 module_param_named(debug_level, isert_debug_level, int, 0644);
43 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex);
46 static LIST_HEAD(device_list);
47 static struct workqueue_struct *isert_comp_wq;
48 static struct workqueue_struct *isert_release_wq;
51 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
53 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
56 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
58 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
59 struct isert_rdma_wr *wr);
61 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
63 isert_rdma_post_recvl(struct isert_conn *isert_conn);
65 isert_rdma_accept(struct isert_conn *isert_conn);
66 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
69 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
71 return (conn->pi_support &&
72 cmd->prot_op != TARGET_PROT_NORMAL);
77 isert_qp_event_callback(struct ib_event *e, void *context)
79 struct isert_conn *isert_conn = (struct isert_conn *)context;
81 isert_err("conn %p event: %d\n", isert_conn, e->event);
83 case IB_EVENT_COMM_EST:
84 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
86 case IB_EVENT_QP_LAST_WQE_REACHED:
87 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
95 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
99 ret = ib_query_device(ib_dev, devattr);
101 isert_err("ib_query_device() failed: %d\n", ret);
104 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
105 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
111 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
113 struct isert_device *device = isert_conn->conn_device;
114 struct ib_qp_init_attr attr;
115 struct isert_comp *comp;
118 mutex_lock(&device_list_mutex);
119 for (i = 0; i < device->comps_used; i++)
120 if (device->comps[i].active_qps <
121 device->comps[min].active_qps)
123 comp = &device->comps[min];
125 isert_info("conn %p, using comp %p min_index: %d\n",
126 isert_conn, comp, min);
127 mutex_unlock(&device_list_mutex);
129 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
130 attr.event_handler = isert_qp_event_callback;
131 attr.qp_context = isert_conn;
132 attr.send_cq = comp->cq;
133 attr.recv_cq = comp->cq;
134 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
135 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
137 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
138 * work-around for RDMA_READs with ConnectX-2.
140 * Also, still make sure to have at least two SGEs for
141 * outgoing control PDU responses.
143 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
144 isert_conn->max_sge = attr.cap.max_send_sge;
146 attr.cap.max_recv_sge = 1;
147 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
148 attr.qp_type = IB_QPT_RC;
149 if (device->pi_capable)
150 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
152 ret = rdma_create_qp(cma_id, device->pd, &attr);
154 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
157 isert_conn->conn_qp = cma_id->qp;
161 mutex_lock(&device_list_mutex);
163 mutex_unlock(&device_list_mutex);
169 isert_cq_event_callback(struct ib_event *e, void *context)
171 isert_dbg("event: %d\n", e->event);
175 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
177 struct isert_device *device = isert_conn->conn_device;
178 struct ib_device *ib_dev = device->ib_device;
179 struct iser_rx_desc *rx_desc;
180 struct ib_sge *rx_sg;
184 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
185 sizeof(struct iser_rx_desc), GFP_KERNEL);
186 if (!isert_conn->conn_rx_descs)
189 rx_desc = isert_conn->conn_rx_descs;
191 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
192 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
193 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
194 if (ib_dma_mapping_error(ib_dev, dma_addr))
197 rx_desc->dma_addr = dma_addr;
199 rx_sg = &rx_desc->rx_sg;
200 rx_sg->addr = rx_desc->dma_addr;
201 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
202 rx_sg->lkey = device->mr->lkey;
205 isert_conn->conn_rx_desc_head = 0;
210 rx_desc = isert_conn->conn_rx_descs;
211 for (j = 0; j < i; j++, rx_desc++) {
212 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
213 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
215 kfree(isert_conn->conn_rx_descs);
216 isert_conn->conn_rx_descs = NULL;
218 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
224 isert_free_rx_descriptors(struct isert_conn *isert_conn)
226 struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
227 struct iser_rx_desc *rx_desc;
230 if (!isert_conn->conn_rx_descs)
233 rx_desc = isert_conn->conn_rx_descs;
234 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
235 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
236 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
239 kfree(isert_conn->conn_rx_descs);
240 isert_conn->conn_rx_descs = NULL;
243 static void isert_cq_work(struct work_struct *);
244 static void isert_cq_callback(struct ib_cq *, void *);
247 isert_create_device_ib_res(struct isert_device *device)
249 struct ib_device *ib_dev = device->ib_device;
250 struct ib_device_attr *dev_attr;
254 dev_attr = &device->dev_attr;
255 ret = isert_query_device(ib_dev, dev_attr);
259 max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
261 /* asign function handlers */
262 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
263 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
264 device->use_fastreg = 1;
265 device->reg_rdma_mem = isert_reg_rdma;
266 device->unreg_rdma_mem = isert_unreg_rdma;
268 device->use_fastreg = 0;
269 device->reg_rdma_mem = isert_map_rdma;
270 device->unreg_rdma_mem = isert_unmap_cmd;
273 /* Check signature cap */
274 device->pi_capable = dev_attr->device_cap_flags &
275 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
277 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
278 device->ib_device->num_comp_vectors));
279 isert_info("Using %d CQs, %s supports %d vectors support "
280 "Fast registration %d pi_capable %d\n",
281 device->comps_used, device->ib_device->name,
282 device->ib_device->num_comp_vectors, device->use_fastreg,
285 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
287 if (!device->comps) {
288 isert_err("Unable to allocate completion contexts\n");
292 for (i = 0; i < device->comps_used; i++) {
293 struct isert_comp *comp = &device->comps[i];
295 comp->device = device;
296 INIT_WORK(&comp->work, isert_cq_work);
297 comp->cq = ib_create_cq(device->ib_device,
299 isert_cq_event_callback,
302 if (IS_ERR(comp->cq)) {
303 ret = PTR_ERR(comp->cq);
308 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
313 device->pd = ib_alloc_pd(device->ib_device);
314 if (IS_ERR(device->pd)) {
315 ret = PTR_ERR(device->pd);
316 isert_err("failed to allocate pd, device %p, ret=%d\n",
321 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE);
322 if (IS_ERR(device->mr)) {
323 ret = PTR_ERR(device->mr);
324 isert_err("failed to create dma mr, device %p, ret=%d\n",
333 ib_dealloc_pd(device->pd);
335 for (i = 0; i < device->comps_used; i++) {
336 struct isert_comp *comp = &device->comps[i];
339 cancel_work_sync(&comp->work);
340 ib_destroy_cq(comp->cq);
343 kfree(device->comps);
349 isert_free_device_ib_res(struct isert_device *device)
353 isert_info("device %p\n", device);
355 ib_dereg_mr(device->mr);
356 ib_dealloc_pd(device->pd);
357 for (i = 0; i < device->comps_used; i++) {
358 struct isert_comp *comp = &device->comps[i];
360 cancel_work_sync(&comp->work);
361 ib_destroy_cq(comp->cq);
364 kfree(device->comps);
368 isert_device_try_release(struct isert_device *device)
370 mutex_lock(&device_list_mutex);
372 isert_info("device %p refcount %d\n", device, device->refcount);
373 if (!device->refcount) {
374 isert_free_device_ib_res(device);
375 list_del(&device->dev_node);
378 mutex_unlock(&device_list_mutex);
381 static struct isert_device *
382 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
384 struct isert_device *device;
387 mutex_lock(&device_list_mutex);
388 list_for_each_entry(device, &device_list, dev_node) {
389 if (device->ib_device->node_guid == cma_id->device->node_guid) {
391 isert_info("Found iser device %p refcount %d\n",
392 device, device->refcount);
393 mutex_unlock(&device_list_mutex);
398 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
400 mutex_unlock(&device_list_mutex);
401 return ERR_PTR(-ENOMEM);
404 INIT_LIST_HEAD(&device->dev_node);
406 device->ib_device = cma_id->device;
407 ret = isert_create_device_ib_res(device);
410 mutex_unlock(&device_list_mutex);
415 list_add_tail(&device->dev_node, &device_list);
416 isert_info("Created a new iser device %p refcount %d\n",
417 device, device->refcount);
418 mutex_unlock(&device_list_mutex);
424 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
426 struct fast_reg_descriptor *fr_desc, *tmp;
429 if (list_empty(&isert_conn->conn_fr_pool))
432 isert_info("Freeing conn %p fastreg pool", isert_conn);
434 list_for_each_entry_safe(fr_desc, tmp,
435 &isert_conn->conn_fr_pool, list) {
436 list_del(&fr_desc->list);
437 ib_free_fast_reg_page_list(fr_desc->data_frpl);
438 ib_dereg_mr(fr_desc->data_mr);
439 if (fr_desc->pi_ctx) {
440 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
441 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
442 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
443 kfree(fr_desc->pi_ctx);
449 if (i < isert_conn->conn_fr_pool_size)
450 isert_warn("Pool still has %d regions registered\n",
451 isert_conn->conn_fr_pool_size - i);
455 isert_create_pi_ctx(struct fast_reg_descriptor *desc,
456 struct ib_device *device,
459 struct ib_mr_init_attr mr_init_attr;
460 struct pi_context *pi_ctx;
463 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
465 isert_err("Failed to allocate pi context\n");
469 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
470 ISCSI_ISER_SG_TABLESIZE);
471 if (IS_ERR(pi_ctx->prot_frpl)) {
472 isert_err("Failed to allocate prot frpl err=%ld\n",
473 PTR_ERR(pi_ctx->prot_frpl));
474 ret = PTR_ERR(pi_ctx->prot_frpl);
478 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
479 if (IS_ERR(pi_ctx->prot_mr)) {
480 isert_err("Failed to allocate prot frmr err=%ld\n",
481 PTR_ERR(pi_ctx->prot_mr));
482 ret = PTR_ERR(pi_ctx->prot_mr);
485 desc->ind |= ISERT_PROT_KEY_VALID;
487 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
488 mr_init_attr.max_reg_descriptors = 2;
489 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
490 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
491 if (IS_ERR(pi_ctx->sig_mr)) {
492 isert_err("Failed to allocate signature enabled mr err=%ld\n",
493 PTR_ERR(pi_ctx->sig_mr));
494 ret = PTR_ERR(pi_ctx->sig_mr);
498 desc->pi_ctx = pi_ctx;
499 desc->ind |= ISERT_SIG_KEY_VALID;
500 desc->ind &= ~ISERT_PROTECTED;
505 ib_dereg_mr(desc->pi_ctx->prot_mr);
507 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
515 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
516 struct fast_reg_descriptor *fr_desc)
520 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
521 ISCSI_ISER_SG_TABLESIZE);
522 if (IS_ERR(fr_desc->data_frpl)) {
523 isert_err("Failed to allocate data frpl err=%ld\n",
524 PTR_ERR(fr_desc->data_frpl));
525 return PTR_ERR(fr_desc->data_frpl);
528 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
529 if (IS_ERR(fr_desc->data_mr)) {
530 isert_err("Failed to allocate data frmr err=%ld\n",
531 PTR_ERR(fr_desc->data_mr));
532 ret = PTR_ERR(fr_desc->data_mr);
535 fr_desc->ind |= ISERT_DATA_KEY_VALID;
537 isert_dbg("Created fr_desc %p\n", fr_desc);
542 ib_free_fast_reg_page_list(fr_desc->data_frpl);
548 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
550 struct fast_reg_descriptor *fr_desc;
551 struct isert_device *device = isert_conn->conn_device;
552 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
553 struct se_node_acl *se_nacl = se_sess->se_node_acl;
556 * Setup the number of FRMRs based upon the number of tags
557 * available to session in iscsi_target_locate_portal().
559 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
560 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
562 isert_conn->conn_fr_pool_size = 0;
563 for (i = 0; i < tag_num; i++) {
564 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
566 isert_err("Failed to allocate fast_reg descriptor\n");
571 ret = isert_create_fr_desc(device->ib_device,
572 device->pd, fr_desc);
574 isert_err("Failed to create fastreg descriptor err=%d\n",
580 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
581 isert_conn->conn_fr_pool_size++;
584 isert_dbg("Creating conn %p fastreg pool size=%d",
585 isert_conn, isert_conn->conn_fr_pool_size);
590 isert_conn_free_fastreg_pool(isert_conn);
595 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
597 struct isert_np *isert_np = cma_id->context;
598 struct iscsi_np *np = isert_np->np;
599 struct isert_conn *isert_conn;
600 struct isert_device *device;
601 struct ib_device *ib_dev = cma_id->device;
604 spin_lock_bh(&np->np_thread_lock);
606 spin_unlock_bh(&np->np_thread_lock);
607 isert_dbg("iscsi_np is not enabled, reject connect request\n");
608 return rdma_reject(cma_id, NULL, 0);
610 spin_unlock_bh(&np->np_thread_lock);
612 isert_dbg("cma_id: %p, portal: %p\n",
613 cma_id, cma_id->context);
615 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
617 isert_err("Unable to allocate isert_conn\n");
620 isert_conn->state = ISER_CONN_INIT;
621 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
622 init_completion(&isert_conn->conn_login_comp);
623 init_completion(&isert_conn->login_req_comp);
624 init_completion(&isert_conn->conn_wait);
625 kref_init(&isert_conn->conn_kref);
626 mutex_init(&isert_conn->conn_mutex);
627 spin_lock_init(&isert_conn->conn_lock);
628 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
630 isert_conn->conn_cm_id = cma_id;
632 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
633 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
634 if (!isert_conn->login_buf) {
635 isert_err("Unable to allocate isert_conn->login_buf\n");
640 isert_conn->login_req_buf = isert_conn->login_buf;
641 isert_conn->login_rsp_buf = isert_conn->login_buf +
642 ISCSI_DEF_MAX_RECV_SEG_LEN;
643 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
644 isert_conn->login_buf, isert_conn->login_req_buf,
645 isert_conn->login_rsp_buf);
647 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
648 (void *)isert_conn->login_req_buf,
649 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
651 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
653 isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
655 isert_conn->login_req_dma = 0;
659 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
660 (void *)isert_conn->login_rsp_buf,
661 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
663 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
665 isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
667 isert_conn->login_rsp_dma = 0;
668 goto out_req_dma_map;
671 device = isert_device_find_by_ib_dev(cma_id);
672 if (IS_ERR(device)) {
673 ret = PTR_ERR(device);
674 goto out_rsp_dma_map;
677 /* Set max inflight RDMA READ requests */
678 isert_conn->initiator_depth = min_t(u8,
679 event->param.conn.initiator_depth,
680 device->dev_attr.max_qp_init_rd_atom);
681 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
683 isert_conn->conn_device = device;
685 ret = isert_conn_setup_qp(isert_conn, cma_id);
689 ret = isert_rdma_post_recvl(isert_conn);
693 ret = isert_rdma_accept(isert_conn);
697 mutex_lock(&isert_np->np_accept_mutex);
698 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
699 mutex_unlock(&isert_np->np_accept_mutex);
701 isert_info("np %p: Allow accept_np to continue\n", np);
702 up(&isert_np->np_sem);
706 isert_device_try_release(device);
708 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
709 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
711 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
712 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
714 kfree(isert_conn->login_buf);
717 rdma_reject(cma_id, NULL, 0);
722 isert_connect_release(struct isert_conn *isert_conn)
724 struct isert_device *device = isert_conn->conn_device;
725 struct ib_device *ib_dev = device->ib_device;
727 isert_dbg("conn %p\n", isert_conn);
729 if (device && device->use_fastreg)
730 isert_conn_free_fastreg_pool(isert_conn);
732 isert_free_rx_descriptors(isert_conn);
733 if (isert_conn->conn_cm_id)
734 rdma_destroy_id(isert_conn->conn_cm_id);
736 if (isert_conn->conn_qp) {
737 struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
739 isert_dbg("dec completion context %p active_qps\n", comp);
740 mutex_lock(&device_list_mutex);
742 mutex_unlock(&device_list_mutex);
744 ib_destroy_qp(isert_conn->conn_qp);
747 if (isert_conn->login_buf) {
748 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
749 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
750 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
751 ISCSI_DEF_MAX_RECV_SEG_LEN,
753 kfree(isert_conn->login_buf);
758 isert_device_try_release(device);
762 isert_connected_handler(struct rdma_cm_id *cma_id)
764 struct isert_conn *isert_conn = cma_id->qp->qp_context;
766 isert_info("conn %p\n", isert_conn);
768 if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
769 isert_warn("conn %p connect_release is running\n", isert_conn);
773 mutex_lock(&isert_conn->conn_mutex);
774 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
775 isert_conn->state = ISER_CONN_UP;
776 mutex_unlock(&isert_conn->conn_mutex);
780 isert_release_conn_kref(struct kref *kref)
782 struct isert_conn *isert_conn = container_of(kref,
783 struct isert_conn, conn_kref);
785 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
788 isert_connect_release(isert_conn);
792 isert_put_conn(struct isert_conn *isert_conn)
794 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
798 * isert_conn_terminate() - Initiate connection termination
799 * @isert_conn: isert connection struct
802 * In case the connection state is FULL_FEATURE, move state
803 * to TEMINATING and start teardown sequence (rdma_disconnect).
804 * In case the connection state is UP, complete flush as well.
806 * This routine must be called with conn_mutex held. Thus it is
807 * safe to call multiple times.
810 isert_conn_terminate(struct isert_conn *isert_conn)
814 switch (isert_conn->state) {
815 case ISER_CONN_TERMINATING:
818 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
819 isert_info("Terminating conn %p state %d\n",
820 isert_conn, isert_conn->state);
821 isert_conn->state = ISER_CONN_TERMINATING;
822 err = rdma_disconnect(isert_conn->conn_cm_id);
824 isert_warn("Failed rdma_disconnect isert_conn %p\n",
828 isert_warn("conn %p teminating in state %d\n",
829 isert_conn, isert_conn->state);
834 isert_np_cma_handler(struct isert_np *isert_np,
835 enum rdma_cm_event_type event)
837 isert_dbg("isert np %p, handling event %d\n", isert_np, event);
840 case RDMA_CM_EVENT_DEVICE_REMOVAL:
841 isert_np->np_cm_id = NULL;
843 case RDMA_CM_EVENT_ADDR_CHANGE:
844 isert_np->np_cm_id = isert_setup_id(isert_np);
845 if (IS_ERR(isert_np->np_cm_id)) {
846 isert_err("isert np %p setup id failed: %ld\n",
847 isert_np, PTR_ERR(isert_np->np_cm_id));
848 isert_np->np_cm_id = NULL;
852 isert_err("isert np %p Unexpected event %d\n",
860 isert_disconnected_handler(struct rdma_cm_id *cma_id,
861 enum rdma_cm_event_type event)
863 struct isert_np *isert_np = cma_id->context;
864 struct isert_conn *isert_conn;
866 if (isert_np->np_cm_id == cma_id)
867 return isert_np_cma_handler(cma_id->context, event);
869 isert_conn = cma_id->qp->qp_context;
871 mutex_lock(&isert_conn->conn_mutex);
872 isert_conn_terminate(isert_conn);
873 mutex_unlock(&isert_conn->conn_mutex);
875 isert_info("conn %p completing conn_wait\n", isert_conn);
876 complete(&isert_conn->conn_wait);
882 isert_connect_error(struct rdma_cm_id *cma_id)
884 struct isert_conn *isert_conn = cma_id->qp->qp_context;
886 isert_conn->conn_cm_id = NULL;
887 isert_put_conn(isert_conn);
893 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
897 isert_info("event %d status %d id %p np %p\n", event->event,
898 event->status, cma_id, cma_id->context);
900 switch (event->event) {
901 case RDMA_CM_EVENT_CONNECT_REQUEST:
902 ret = isert_connect_request(cma_id, event);
904 isert_err("failed handle connect request %d\n", ret);
906 case RDMA_CM_EVENT_ESTABLISHED:
907 isert_connected_handler(cma_id);
909 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
910 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
911 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
912 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
913 ret = isert_disconnected_handler(cma_id, event->event);
915 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
916 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
917 case RDMA_CM_EVENT_CONNECT_ERROR:
918 ret = isert_connect_error(cma_id);
921 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
929 isert_post_recv(struct isert_conn *isert_conn, u32 count)
931 struct ib_recv_wr *rx_wr, *rx_wr_failed;
933 unsigned int rx_head = isert_conn->conn_rx_desc_head;
934 struct iser_rx_desc *rx_desc;
936 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
937 rx_desc = &isert_conn->conn_rx_descs[rx_head];
938 rx_wr->wr_id = (uintptr_t)rx_desc;
939 rx_wr->sg_list = &rx_desc->rx_sg;
941 rx_wr->next = rx_wr + 1;
942 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
946 rx_wr->next = NULL; /* mark end of work requests list */
948 isert_conn->post_recv_buf_count += count;
949 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
952 isert_err("ib_post_recv() failed with ret: %d\n", ret);
953 isert_conn->post_recv_buf_count -= count;
955 isert_dbg("Posted %d RX buffers\n", count);
956 isert_conn->conn_rx_desc_head = rx_head;
962 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
964 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
965 struct ib_send_wr send_wr, *send_wr_failed;
968 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
969 ISER_HEADERS_LEN, DMA_TO_DEVICE);
972 send_wr.wr_id = (uintptr_t)tx_desc;
973 send_wr.sg_list = tx_desc->tx_sg;
974 send_wr.num_sge = tx_desc->num_sge;
975 send_wr.opcode = IB_WR_SEND;
976 send_wr.send_flags = IB_SEND_SIGNALED;
978 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
980 isert_err("ib_post_send() failed, ret: %d\n", ret);
986 isert_create_send_desc(struct isert_conn *isert_conn,
987 struct isert_cmd *isert_cmd,
988 struct iser_tx_desc *tx_desc)
990 struct isert_device *device = isert_conn->conn_device;
991 struct ib_device *ib_dev = device->ib_device;
993 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
994 ISER_HEADERS_LEN, DMA_TO_DEVICE);
996 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
997 tx_desc->iser_header.flags = ISER_VER;
999 tx_desc->num_sge = 1;
1000 tx_desc->isert_cmd = isert_cmd;
1002 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
1003 tx_desc->tx_sg[0].lkey = device->mr->lkey;
1004 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
1009 isert_init_tx_hdrs(struct isert_conn *isert_conn,
1010 struct iser_tx_desc *tx_desc)
1012 struct isert_device *device = isert_conn->conn_device;
1013 struct ib_device *ib_dev = device->ib_device;
1016 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1017 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1018 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
1019 isert_err("ib_dma_mapping_error() failed\n");
1023 tx_desc->dma_addr = dma_addr;
1024 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1025 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1026 tx_desc->tx_sg[0].lkey = device->mr->lkey;
1028 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1029 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
1030 tx_desc->tx_sg[0].lkey);
1036 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1037 struct ib_send_wr *send_wr)
1039 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1041 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
1042 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
1043 send_wr->opcode = IB_WR_SEND;
1044 send_wr->sg_list = &tx_desc->tx_sg[0];
1045 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
1046 send_wr->send_flags = IB_SEND_SIGNALED;
1050 isert_rdma_post_recvl(struct isert_conn *isert_conn)
1052 struct ib_recv_wr rx_wr, *rx_wr_fail;
1056 memset(&sge, 0, sizeof(struct ib_sge));
1057 sge.addr = isert_conn->login_req_dma;
1058 sge.length = ISER_RX_LOGIN_SIZE;
1059 sge.lkey = isert_conn->conn_device->mr->lkey;
1061 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1062 sge.addr, sge.length, sge.lkey);
1064 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1065 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
1066 rx_wr.sg_list = &sge;
1069 isert_conn->post_recv_buf_count++;
1070 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1072 isert_err("ib_post_recv() failed: %d\n", ret);
1073 isert_conn->post_recv_buf_count--;
1080 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1083 struct isert_conn *isert_conn = conn->context;
1084 struct isert_device *device = isert_conn->conn_device;
1085 struct ib_device *ib_dev = device->ib_device;
1086 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
1089 isert_create_send_desc(isert_conn, NULL, tx_desc);
1091 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1092 sizeof(struct iscsi_hdr));
1094 isert_init_tx_hdrs(isert_conn, tx_desc);
1097 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1099 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1100 length, DMA_TO_DEVICE);
1102 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1104 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1105 length, DMA_TO_DEVICE);
1107 tx_dsg->addr = isert_conn->login_rsp_dma;
1108 tx_dsg->length = length;
1109 tx_dsg->lkey = isert_conn->conn_device->mr->lkey;
1110 tx_desc->num_sge = 2;
1112 if (!login->login_failed) {
1113 if (login->login_complete) {
1114 if (!conn->sess->sess_ops->SessionType &&
1115 isert_conn->conn_device->use_fastreg) {
1116 ret = isert_conn_create_fastreg_pool(isert_conn);
1118 isert_err("Conn: %p failed to create"
1119 " fastreg pool\n", isert_conn);
1124 ret = isert_alloc_rx_descriptors(isert_conn);
1128 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1132 /* Now we are in FULL_FEATURE phase */
1133 mutex_lock(&isert_conn->conn_mutex);
1134 isert_conn->state = ISER_CONN_FULL_FEATURE;
1135 mutex_unlock(&isert_conn->conn_mutex);
1139 ret = isert_rdma_post_recvl(isert_conn);
1144 ret = isert_post_send(isert_conn, tx_desc);
1152 isert_rx_login_req(struct isert_conn *isert_conn)
1154 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1155 int rx_buflen = isert_conn->login_req_len;
1156 struct iscsi_conn *conn = isert_conn->conn;
1157 struct iscsi_login *login = conn->conn_login;
1160 isert_info("conn %p\n", isert_conn);
1162 WARN_ON_ONCE(!login);
1164 if (login->first_request) {
1165 struct iscsi_login_req *login_req =
1166 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1168 * Setup the initial iscsi_login values from the leading
1169 * login request PDU.
1171 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1172 login->current_stage =
1173 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1175 login->version_min = login_req->min_version;
1176 login->version_max = login_req->max_version;
1177 memcpy(login->isid, login_req->isid, 6);
1178 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1179 login->init_task_tag = login_req->itt;
1180 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1181 login->cid = be16_to_cpu(login_req->cid);
1182 login->tsih = be16_to_cpu(login_req->tsih);
1185 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1187 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1188 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1189 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1190 MAX_KEY_VALUE_PAIRS);
1191 memcpy(login->req_buf, &rx_desc->data[0], size);
1193 if (login->first_request) {
1194 complete(&isert_conn->conn_login_comp);
1197 schedule_delayed_work(&conn->login_work, 0);
1200 static struct iscsi_cmd
1201 *isert_allocate_cmd(struct iscsi_conn *conn)
1203 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1204 struct isert_cmd *isert_cmd;
1205 struct iscsi_cmd *cmd;
1207 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1209 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1212 isert_cmd = iscsit_priv_cmd(cmd);
1213 isert_cmd->conn = isert_conn;
1214 isert_cmd->iscsi_cmd = cmd;
1220 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1221 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1222 struct iser_rx_desc *rx_desc, unsigned char *buf)
1224 struct iscsi_conn *conn = isert_conn->conn;
1225 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1226 struct scatterlist *sg;
1227 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1228 bool dump_payload = false;
1230 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1234 imm_data = cmd->immediate_data;
1235 imm_data_len = cmd->first_burst_len;
1236 unsol_data = cmd->unsolicited_data;
1238 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1241 } else if (rc > 0) {
1242 dump_payload = true;
1249 sg = &cmd->se_cmd.t_data_sg[0];
1250 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1252 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1253 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1255 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1257 cmd->write_data_done += imm_data_len;
1259 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1260 spin_lock_bh(&cmd->istate_lock);
1261 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1262 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1263 spin_unlock_bh(&cmd->istate_lock);
1267 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1269 if (!rc && dump_payload == false && unsol_data)
1270 iscsit_set_unsoliticed_dataout(cmd);
1271 else if (dump_payload && imm_data)
1272 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1278 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1279 struct iser_rx_desc *rx_desc, unsigned char *buf)
1281 struct scatterlist *sg_start;
1282 struct iscsi_conn *conn = isert_conn->conn;
1283 struct iscsi_cmd *cmd = NULL;
1284 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1285 u32 unsol_data_len = ntoh24(hdr->dlength);
1286 int rc, sg_nents, sg_off, page_off;
1288 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1294 * FIXME: Unexpected unsolicited_data out
1296 if (!cmd->unsolicited_data) {
1297 isert_err("Received unexpected solicited data payload\n");
1302 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1303 "write_data_done: %u, data_length: %u\n",
1304 unsol_data_len, cmd->write_data_done,
1305 cmd->se_cmd.data_length);
1307 sg_off = cmd->write_data_done / PAGE_SIZE;
1308 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1309 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1310 page_off = cmd->write_data_done % PAGE_SIZE;
1312 * FIXME: Non page-aligned unsolicited_data out
1315 isert_err("unexpected non-page aligned data payload\n");
1319 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1320 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1321 sg_nents, &rx_desc->data[0], unsol_data_len);
1323 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1326 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1334 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1335 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1338 struct iscsi_conn *conn = isert_conn->conn;
1339 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1342 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1346 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1349 return iscsit_process_nop_out(conn, cmd, hdr);
1353 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1354 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1355 struct iscsi_text *hdr)
1357 struct iscsi_conn *conn = isert_conn->conn;
1358 u32 payload_length = ntoh24(hdr->dlength);
1360 unsigned char *text_in = NULL;
1362 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1366 if (payload_length) {
1367 text_in = kzalloc(payload_length, GFP_KERNEL);
1369 isert_err("Unable to allocate text_in of payload_length: %u\n",
1374 cmd->text_in_ptr = text_in;
1376 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1378 return iscsit_process_text_cmd(conn, cmd, hdr);
1382 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1383 uint32_t read_stag, uint64_t read_va,
1384 uint32_t write_stag, uint64_t write_va)
1386 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1387 struct iscsi_conn *conn = isert_conn->conn;
1388 struct iscsi_session *sess = conn->sess;
1389 struct iscsi_cmd *cmd;
1390 struct isert_cmd *isert_cmd;
1392 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1394 if (sess->sess_ops->SessionType &&
1395 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1396 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1397 " ignoring\n", opcode);
1402 case ISCSI_OP_SCSI_CMD:
1403 cmd = isert_allocate_cmd(conn);
1407 isert_cmd = iscsit_priv_cmd(cmd);
1408 isert_cmd->read_stag = read_stag;
1409 isert_cmd->read_va = read_va;
1410 isert_cmd->write_stag = write_stag;
1411 isert_cmd->write_va = write_va;
1413 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1414 rx_desc, (unsigned char *)hdr);
1416 case ISCSI_OP_NOOP_OUT:
1417 cmd = isert_allocate_cmd(conn);
1421 isert_cmd = iscsit_priv_cmd(cmd);
1422 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1423 rx_desc, (unsigned char *)hdr);
1425 case ISCSI_OP_SCSI_DATA_OUT:
1426 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1427 (unsigned char *)hdr);
1429 case ISCSI_OP_SCSI_TMFUNC:
1430 cmd = isert_allocate_cmd(conn);
1434 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1435 (unsigned char *)hdr);
1437 case ISCSI_OP_LOGOUT:
1438 cmd = isert_allocate_cmd(conn);
1442 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1445 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
1446 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1450 cmd = isert_allocate_cmd(conn);
1455 isert_cmd = iscsit_priv_cmd(cmd);
1456 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1457 rx_desc, (struct iscsi_text *)hdr);
1460 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1469 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1471 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1472 uint64_t read_va = 0, write_va = 0;
1473 uint32_t read_stag = 0, write_stag = 0;
1476 switch (iser_hdr->flags & 0xF0) {
1478 if (iser_hdr->flags & ISER_RSV) {
1479 read_stag = be32_to_cpu(iser_hdr->read_stag);
1480 read_va = be64_to_cpu(iser_hdr->read_va);
1481 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1482 read_stag, (unsigned long long)read_va);
1484 if (iser_hdr->flags & ISER_WSV) {
1485 write_stag = be32_to_cpu(iser_hdr->write_stag);
1486 write_va = be64_to_cpu(iser_hdr->write_va);
1487 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1488 write_stag, (unsigned long long)write_va);
1491 isert_dbg("ISER ISCSI_CTRL PDU\n");
1494 isert_err("iSER Hello message\n");
1497 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1501 rc = isert_rx_opcode(isert_conn, rx_desc,
1502 read_stag, read_va, write_stag, write_va);
1506 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1509 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1510 struct iscsi_hdr *hdr;
1512 int rx_buflen, outstanding;
1514 if ((char *)desc == isert_conn->login_req_buf) {
1515 rx_dma = isert_conn->login_req_dma;
1516 rx_buflen = ISER_RX_LOGIN_SIZE;
1517 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1520 rx_dma = desc->dma_addr;
1521 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1522 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1526 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1528 hdr = &desc->iscsi_header;
1529 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1530 hdr->opcode, hdr->itt, hdr->flags,
1531 (int)(xfer_len - ISER_HEADERS_LEN));
1533 if ((char *)desc == isert_conn->login_req_buf) {
1534 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1535 if (isert_conn->conn) {
1536 struct iscsi_login *login = isert_conn->conn->conn_login;
1538 if (login && !login->first_request)
1539 isert_rx_login_req(isert_conn);
1541 mutex_lock(&isert_conn->conn_mutex);
1542 complete(&isert_conn->login_req_comp);
1543 mutex_unlock(&isert_conn->conn_mutex);
1545 isert_rx_do_work(desc, isert_conn);
1548 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1551 isert_conn->post_recv_buf_count--;
1552 isert_dbg("Decremented post_recv_buf_count: %d\n",
1553 isert_conn->post_recv_buf_count);
1555 if ((char *)desc == isert_conn->login_req_buf)
1558 outstanding = isert_conn->post_recv_buf_count;
1559 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1560 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1561 ISERT_MIN_POSTED_RX);
1562 err = isert_post_recv(isert_conn, count);
1564 isert_err("isert_post_recv() count: %d failed, %d\n",
1571 isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1572 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1573 enum iser_ib_op_code op, struct isert_data_buf *data)
1575 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1577 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1578 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1580 data->len = length - offset;
1581 data->offset = offset;
1582 data->sg_off = data->offset / PAGE_SIZE;
1584 data->sg = &sg[data->sg_off];
1585 data->nents = min_t(unsigned int, nents - data->sg_off,
1586 ISCSI_ISER_SG_TABLESIZE);
1587 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1590 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1592 if (unlikely(!data->dma_nents)) {
1593 isert_err("Cmd: unable to dma map SGs %p\n", sg);
1597 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1598 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1604 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1606 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1608 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1609 memset(data, 0, sizeof(*data));
1615 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1617 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1619 isert_dbg("Cmd %p\n", isert_cmd);
1622 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1623 isert_unmap_data_buf(isert_conn, &wr->data);
1627 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1633 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
1640 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1642 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1643 LIST_HEAD(unmap_list);
1645 isert_dbg("Cmd %p\n", isert_cmd);
1648 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
1649 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1650 isert_unmap_data_buf(isert_conn, &wr->prot);
1651 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1653 spin_lock_bh(&isert_conn->conn_lock);
1654 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1655 spin_unlock_bh(&isert_conn->conn_lock);
1660 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1661 isert_unmap_data_buf(isert_conn, &wr->data);
1669 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1671 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1672 struct isert_conn *isert_conn = isert_cmd->conn;
1673 struct iscsi_conn *conn = isert_conn->conn;
1674 struct isert_device *device = isert_conn->conn_device;
1675 struct iscsi_text_rsp *hdr;
1677 isert_dbg("Cmd %p\n", isert_cmd);
1679 switch (cmd->iscsi_opcode) {
1680 case ISCSI_OP_SCSI_CMD:
1681 spin_lock_bh(&conn->cmd_lock);
1682 if (!list_empty(&cmd->i_conn_node))
1683 list_del_init(&cmd->i_conn_node);
1684 spin_unlock_bh(&conn->cmd_lock);
1686 if (cmd->data_direction == DMA_TO_DEVICE) {
1687 iscsit_stop_dataout_timer(cmd);
1689 * Check for special case during comp_err where
1690 * WRITE_PENDING has been handed off from core,
1691 * but requires an extra target_put_sess_cmd()
1692 * before transport_generic_free_cmd() below.
1695 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1696 struct se_cmd *se_cmd = &cmd->se_cmd;
1698 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1702 device->unreg_rdma_mem(isert_cmd, isert_conn);
1703 transport_generic_free_cmd(&cmd->se_cmd, 0);
1705 case ISCSI_OP_SCSI_TMFUNC:
1706 spin_lock_bh(&conn->cmd_lock);
1707 if (!list_empty(&cmd->i_conn_node))
1708 list_del_init(&cmd->i_conn_node);
1709 spin_unlock_bh(&conn->cmd_lock);
1711 transport_generic_free_cmd(&cmd->se_cmd, 0);
1713 case ISCSI_OP_REJECT:
1714 case ISCSI_OP_NOOP_OUT:
1716 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1717 /* If the continue bit is on, keep the command alive */
1718 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1721 spin_lock_bh(&conn->cmd_lock);
1722 if (!list_empty(&cmd->i_conn_node))
1723 list_del_init(&cmd->i_conn_node);
1724 spin_unlock_bh(&conn->cmd_lock);
1727 * Handle special case for REJECT when iscsi_add_reject*() has
1728 * overwritten the original iscsi_opcode assignment, and the
1729 * associated cmd->se_cmd needs to be released.
1731 if (cmd->se_cmd.se_tfo != NULL) {
1732 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1734 transport_generic_free_cmd(&cmd->se_cmd, 0);
1741 iscsit_release_cmd(cmd);
1747 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1749 if (tx_desc->dma_addr != 0) {
1750 isert_dbg("unmap single for tx_desc->dma_addr\n");
1751 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1752 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1753 tx_desc->dma_addr = 0;
1758 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1759 struct ib_device *ib_dev, bool comp_err)
1761 if (isert_cmd->pdu_buf_dma != 0) {
1762 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1763 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1764 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1765 isert_cmd->pdu_buf_dma = 0;
1768 isert_unmap_tx_desc(tx_desc, ib_dev);
1769 isert_put_cmd(isert_cmd, comp_err);
1773 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1775 struct ib_mr_status mr_status;
1778 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1780 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1781 goto fail_mr_status;
1784 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1786 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1788 switch (mr_status.sig_err.err_type) {
1789 case IB_SIG_BAD_GUARD:
1790 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1792 case IB_SIG_BAD_REFTAG:
1793 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1795 case IB_SIG_BAD_APPTAG:
1796 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1799 sec_offset_err = mr_status.sig_err.sig_err_offset;
1800 do_div(sec_offset_err, block_size);
1801 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1803 isert_err("PI error found type %d at sector 0x%llx "
1804 "expected 0x%x vs actual 0x%x\n",
1805 mr_status.sig_err.err_type,
1806 (unsigned long long)se_cmd->bad_sector,
1807 mr_status.sig_err.expected,
1808 mr_status.sig_err.actual);
1817 isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1818 struct isert_cmd *isert_cmd)
1820 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1821 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1822 struct se_cmd *se_cmd = &cmd->se_cmd;
1823 struct isert_conn *isert_conn = isert_cmd->conn;
1824 struct isert_device *device = isert_conn->conn_device;
1827 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1828 ret = isert_check_pi_status(se_cmd,
1829 wr->fr_desc->pi_ctx->sig_mr);
1830 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1833 device->unreg_rdma_mem(isert_cmd, isert_conn);
1834 wr->send_wr_num = 0;
1836 transport_send_check_condition_and_sense(se_cmd,
1839 isert_put_response(isert_conn->conn, cmd);
1843 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1844 struct isert_cmd *isert_cmd)
1846 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1847 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1848 struct se_cmd *se_cmd = &cmd->se_cmd;
1849 struct isert_conn *isert_conn = isert_cmd->conn;
1850 struct isert_device *device = isert_conn->conn_device;
1853 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1854 ret = isert_check_pi_status(se_cmd,
1855 wr->fr_desc->pi_ctx->sig_mr);
1856 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1859 iscsit_stop_dataout_timer(cmd);
1860 device->unreg_rdma_mem(isert_cmd, isert_conn);
1861 cmd->write_data_done = wr->data.len;
1862 wr->send_wr_num = 0;
1864 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1865 spin_lock_bh(&cmd->istate_lock);
1866 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1867 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1868 spin_unlock_bh(&cmd->istate_lock);
1871 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1872 transport_send_check_condition_and_sense(se_cmd,
1875 target_execute_cmd(se_cmd);
1880 isert_do_control_comp(struct work_struct *work)
1882 struct isert_cmd *isert_cmd = container_of(work,
1883 struct isert_cmd, comp_work);
1884 struct isert_conn *isert_conn = isert_cmd->conn;
1885 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1886 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1888 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1890 switch (cmd->i_state) {
1891 case ISTATE_SEND_TASKMGTRSP:
1892 iscsit_tmr_post_handler(cmd, cmd->conn);
1893 case ISTATE_SEND_REJECT: /* FALLTHRU */
1894 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1895 cmd->i_state = ISTATE_SENT_STATUS;
1896 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1899 case ISTATE_SEND_LOGOUTRSP:
1900 iscsit_logout_post_handler(cmd, cmd->conn);
1903 isert_err("Unknown i_state %d\n", cmd->i_state);
1910 isert_response_completion(struct iser_tx_desc *tx_desc,
1911 struct isert_cmd *isert_cmd,
1912 struct isert_conn *isert_conn,
1913 struct ib_device *ib_dev)
1915 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1917 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1918 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1919 cmd->i_state == ISTATE_SEND_REJECT ||
1920 cmd->i_state == ISTATE_SEND_TEXTRSP) {
1921 isert_unmap_tx_desc(tx_desc, ib_dev);
1923 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1924 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1928 cmd->i_state = ISTATE_SENT_STATUS;
1929 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1933 isert_send_completion(struct iser_tx_desc *tx_desc,
1934 struct isert_conn *isert_conn)
1936 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1937 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1938 struct isert_rdma_wr *wr;
1941 isert_unmap_tx_desc(tx_desc, ib_dev);
1944 wr = &isert_cmd->rdma_wr;
1946 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
1948 switch (wr->iser_ib_op) {
1950 isert_response_completion(tx_desc, isert_cmd,
1951 isert_conn, ib_dev);
1953 case ISER_IB_RDMA_WRITE:
1954 isert_completion_rdma_write(tx_desc, isert_cmd);
1956 case ISER_IB_RDMA_READ:
1957 isert_completion_rdma_read(tx_desc, isert_cmd);
1960 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
1967 * is_isert_tx_desc() - Indicate if the completion wr_id
1968 * is a TX descriptor or not.
1969 * @isert_conn: iser connection
1970 * @wr_id: completion WR identifier
1972 * Since we cannot rely on wc opcode in FLUSH errors
1973 * we must work around it by checking if the wr_id address
1974 * falls in the iser connection rx_descs buffer. If so
1975 * it is an RX descriptor, otherwize it is a TX.
1978 is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
1980 void *start = isert_conn->conn_rx_descs;
1981 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
1983 if (wr_id >= start && wr_id < start + len)
1990 isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
1992 if (wc->wr_id == ISER_BEACON_WRID) {
1993 isert_info("conn %p completing conn_wait_comp_err\n",
1995 complete(&isert_conn->conn_wait_comp_err);
1996 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
1997 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1998 struct isert_cmd *isert_cmd;
1999 struct iser_tx_desc *desc;
2001 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2002 isert_cmd = desc->isert_cmd;
2004 isert_unmap_tx_desc(desc, ib_dev);
2006 isert_completion_put(desc, isert_cmd, ib_dev, true);
2008 isert_conn->post_recv_buf_count--;
2009 if (!isert_conn->post_recv_buf_count)
2010 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2015 isert_handle_wc(struct ib_wc *wc)
2017 struct isert_conn *isert_conn;
2018 struct iser_tx_desc *tx_desc;
2019 struct iser_rx_desc *rx_desc;
2021 isert_conn = wc->qp->qp_context;
2022 if (likely(wc->status == IB_WC_SUCCESS)) {
2023 if (wc->opcode == IB_WC_RECV) {
2024 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
2025 isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
2027 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2028 isert_send_completion(tx_desc, isert_conn);
2031 if (wc->status != IB_WC_WR_FLUSH_ERR)
2032 isert_err("wr id %llx status %d vend_err %x\n",
2033 wc->wr_id, wc->status, wc->vendor_err);
2035 isert_dbg("flush error: wr id %llx\n", wc->wr_id);
2037 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2038 isert_cq_comp_err(isert_conn, wc);
2043 isert_cq_work(struct work_struct *work)
2045 enum { isert_poll_budget = 65536 };
2046 struct isert_comp *comp = container_of(work, struct isert_comp,
2048 struct ib_wc *const wcs = comp->wcs;
2049 int i, n, completed = 0;
2051 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2052 for (i = 0; i < n; i++)
2053 isert_handle_wc(&wcs[i]);
2056 if (completed >= isert_poll_budget)
2060 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
2064 isert_cq_callback(struct ib_cq *cq, void *context)
2066 struct isert_comp *comp = context;
2068 queue_work(isert_comp_wq, &comp->work);
2072 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2074 struct ib_send_wr *wr_failed;
2077 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2080 isert_err("ib_post_send failed with %d\n", ret);
2087 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2089 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2090 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2091 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2092 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2093 &isert_cmd->tx_desc.iscsi_header;
2095 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2096 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2097 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2099 * Attach SENSE DATA payload to iSCSI Response PDU
2101 if (cmd->se_cmd.sense_buffer &&
2102 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2103 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2104 struct isert_device *device = isert_conn->conn_device;
2105 struct ib_device *ib_dev = device->ib_device;
2106 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2107 u32 padding, pdu_len;
2109 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2111 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2113 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2114 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
2115 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
2117 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2118 (void *)cmd->sense_buffer, pdu_len,
2121 isert_cmd->pdu_buf_len = pdu_len;
2122 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2123 tx_dsg->length = pdu_len;
2124 tx_dsg->lkey = device->mr->lkey;
2125 isert_cmd->tx_desc.num_sge = 2;
2128 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2130 isert_dbg("Posting SCSI Response\n");
2132 return isert_post_response(isert_conn, isert_cmd);
2136 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2138 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2139 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2140 struct isert_device *device = isert_conn->conn_device;
2142 spin_lock_bh(&conn->cmd_lock);
2143 if (!list_empty(&cmd->i_conn_node))
2144 list_del_init(&cmd->i_conn_node);
2145 spin_unlock_bh(&conn->cmd_lock);
2147 if (cmd->data_direction == DMA_TO_DEVICE)
2148 iscsit_stop_dataout_timer(cmd);
2150 device->unreg_rdma_mem(isert_cmd, isert_conn);
2153 static enum target_prot_op
2154 isert_get_sup_prot_ops(struct iscsi_conn *conn)
2156 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2157 struct isert_device *device = isert_conn->conn_device;
2159 if (conn->tpg->tpg_attrib.t10_pi) {
2160 if (device->pi_capable) {
2161 isert_info("conn %p PI offload enabled\n", isert_conn);
2162 isert_conn->pi_support = true;
2163 return TARGET_PROT_ALL;
2167 isert_info("conn %p PI offload disabled\n", isert_conn);
2168 isert_conn->pi_support = false;
2170 return TARGET_PROT_NORMAL;
2174 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2175 bool nopout_response)
2177 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2178 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2179 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2181 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2182 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2183 &isert_cmd->tx_desc.iscsi_header,
2185 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2186 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2188 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
2190 return isert_post_response(isert_conn, isert_cmd);
2194 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2196 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2197 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2198 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2200 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2201 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2202 &isert_cmd->tx_desc.iscsi_header);
2203 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2204 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2206 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
2208 return isert_post_response(isert_conn, isert_cmd);
2212 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2214 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2215 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2216 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2218 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2219 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2220 &isert_cmd->tx_desc.iscsi_header);
2221 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2222 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2224 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
2226 return isert_post_response(isert_conn, isert_cmd);
2230 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2232 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2233 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2234 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2235 struct isert_device *device = isert_conn->conn_device;
2236 struct ib_device *ib_dev = device->ib_device;
2237 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2238 struct iscsi_reject *hdr =
2239 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
2241 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2242 iscsit_build_reject(cmd, conn, hdr);
2243 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2245 hton24(hdr->dlength, ISCSI_HDR_LEN);
2246 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2247 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2249 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2250 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2251 tx_dsg->length = ISCSI_HDR_LEN;
2252 tx_dsg->lkey = device->mr->lkey;
2253 isert_cmd->tx_desc.num_sge = 2;
2255 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2257 isert_dbg("conn %p Posting Reject\n", isert_conn);
2259 return isert_post_response(isert_conn, isert_cmd);
2263 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2265 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2266 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2267 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2268 struct iscsi_text_rsp *hdr =
2269 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2273 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2274 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2279 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2282 struct isert_device *device = isert_conn->conn_device;
2283 struct ib_device *ib_dev = device->ib_device;
2284 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2285 void *txt_rsp_buf = cmd->buf_ptr;
2287 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2288 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2290 isert_cmd->pdu_buf_len = txt_rsp_len;
2291 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2292 tx_dsg->length = txt_rsp_len;
2293 tx_dsg->lkey = device->mr->lkey;
2294 isert_cmd->tx_desc.num_sge = 2;
2296 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2298 isert_dbg("conn %p Text Response\n", isert_conn);
2300 return isert_post_response(isert_conn, isert_cmd);
2304 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2305 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2306 u32 data_left, u32 offset)
2308 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2309 struct scatterlist *sg_start, *tmp_sg;
2310 struct isert_device *device = isert_conn->conn_device;
2311 struct ib_device *ib_dev = device->ib_device;
2312 u32 sg_off, page_off;
2313 int i = 0, sg_nents;
2315 sg_off = offset / PAGE_SIZE;
2316 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2317 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2318 page_off = offset % PAGE_SIZE;
2320 send_wr->sg_list = ib_sge;
2321 send_wr->num_sge = sg_nents;
2322 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2324 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2326 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2327 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2329 (unsigned long long)tmp_sg->dma_address,
2330 tmp_sg->length, page_off);
2332 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2333 ib_sge->length = min_t(u32, data_left,
2334 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2335 ib_sge->lkey = device->mr->lkey;
2337 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2338 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2340 data_left -= ib_sge->length;
2342 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2345 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2346 send_wr->sg_list, send_wr->num_sge);
2352 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2353 struct isert_rdma_wr *wr)
2355 struct se_cmd *se_cmd = &cmd->se_cmd;
2356 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2357 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2358 struct isert_data_buf *data = &wr->data;
2359 struct ib_send_wr *send_wr;
2360 struct ib_sge *ib_sge;
2361 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2362 int ret = 0, i, ib_sge_cnt;
2364 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2366 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2367 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2368 se_cmd->t_data_nents, se_cmd->data_length,
2369 offset, wr->iser_ib_op, &wr->data);
2373 data_left = data->len;
2374 offset = data->offset;
2376 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2378 isert_warn("Unable to allocate ib_sge\n");
2382 wr->ib_sge = ib_sge;
2384 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2385 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2388 isert_dbg("Unable to allocate wr->send_wr\n");
2393 wr->isert_cmd = isert_cmd;
2394 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2396 for (i = 0; i < wr->send_wr_num; i++) {
2397 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2398 data_len = min(data_left, rdma_write_max);
2400 send_wr->send_flags = 0;
2401 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2402 send_wr->opcode = IB_WR_RDMA_WRITE;
2403 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2404 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2405 if (i + 1 == wr->send_wr_num)
2406 send_wr->next = &isert_cmd->tx_desc.send_wr;
2408 send_wr->next = &wr->send_wr[i + 1];
2410 send_wr->opcode = IB_WR_RDMA_READ;
2411 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2412 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2413 if (i + 1 == wr->send_wr_num)
2414 send_wr->send_flags = IB_SEND_SIGNALED;
2416 send_wr->next = &wr->send_wr[i + 1];
2419 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2420 send_wr, data_len, offset);
2421 ib_sge += ib_sge_cnt;
2424 va_offset += data_len;
2425 data_left -= data_len;
2430 isert_unmap_data_buf(isert_conn, data);
2436 isert_map_fr_pagelist(struct ib_device *ib_dev,
2437 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2439 u64 start_addr, end_addr, page, chunk_start = 0;
2440 struct scatterlist *tmp_sg;
2441 int i = 0, new_chunk, last_ent, n_pages;
2445 last_ent = sg_nents - 1;
2446 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2447 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2449 chunk_start = start_addr;
2450 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2452 isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
2453 i, (unsigned long long)tmp_sg->dma_address,
2456 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2462 page = chunk_start & PAGE_MASK;
2464 fr_pl[n_pages++] = page;
2465 isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
2468 } while (page < end_addr);
2475 isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2479 memset(inv_wr, 0, sizeof(*inv_wr));
2480 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
2481 inv_wr->opcode = IB_WR_LOCAL_INV;
2482 inv_wr->ex.invalidate_rkey = mr->rkey;
2485 rkey = ib_inc_rkey(mr->rkey);
2486 ib_update_fast_reg_key(mr, rkey);
2490 isert_fast_reg_mr(struct isert_conn *isert_conn,
2491 struct fast_reg_descriptor *fr_desc,
2492 struct isert_data_buf *mem,
2493 enum isert_indicator ind,
2496 struct isert_device *device = isert_conn->conn_device;
2497 struct ib_device *ib_dev = device->ib_device;
2499 struct ib_fast_reg_page_list *frpl;
2500 struct ib_send_wr fr_wr, inv_wr;
2501 struct ib_send_wr *bad_wr, *wr = NULL;
2502 int ret, pagelist_len;
2505 if (mem->dma_nents == 1) {
2506 sge->lkey = device->mr->lkey;
2507 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2508 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2509 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2510 sge->addr, sge->length, sge->lkey);
2514 if (ind == ISERT_DATA_KEY_VALID) {
2515 /* Registering data buffer */
2516 mr = fr_desc->data_mr;
2517 frpl = fr_desc->data_frpl;
2519 /* Registering protection buffer */
2520 mr = fr_desc->pi_ctx->prot_mr;
2521 frpl = fr_desc->pi_ctx->prot_frpl;
2524 page_off = mem->offset % PAGE_SIZE;
2526 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2527 fr_desc, mem->nents, mem->offset);
2529 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2530 &frpl->page_list[0]);
2532 if (!(fr_desc->ind & ind)) {
2533 isert_inv_rkey(&inv_wr, mr);
2537 /* Prepare FASTREG WR */
2538 memset(&fr_wr, 0, sizeof(fr_wr));
2539 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2540 fr_wr.opcode = IB_WR_FAST_REG_MR;
2541 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2542 fr_wr.wr.fast_reg.page_list = frpl;
2543 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2544 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2545 fr_wr.wr.fast_reg.length = mem->len;
2546 fr_wr.wr.fast_reg.rkey = mr->rkey;
2547 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2554 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2556 isert_err("fast registration failed, ret:%d\n", ret);
2559 fr_desc->ind &= ~ind;
2561 sge->lkey = mr->lkey;
2562 sge->addr = frpl->page_list[0] + page_off;
2563 sge->length = mem->len;
2565 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2566 sge->addr, sge->length, sge->lkey);
2572 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2573 struct ib_sig_domain *domain)
2575 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2576 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2577 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2578 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2580 * At the moment we hard code those, but if in the future
2581 * the target core would like to use it, we will take it
2584 domain->sig.dif.apptag_check_mask = 0xffff;
2585 domain->sig.dif.app_escape = true;
2586 domain->sig.dif.ref_escape = true;
2587 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2588 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2589 domain->sig.dif.ref_remap = true;
2593 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2595 switch (se_cmd->prot_op) {
2596 case TARGET_PROT_DIN_INSERT:
2597 case TARGET_PROT_DOUT_STRIP:
2598 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2599 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2601 case TARGET_PROT_DOUT_INSERT:
2602 case TARGET_PROT_DIN_STRIP:
2603 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2604 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2606 case TARGET_PROT_DIN_PASS:
2607 case TARGET_PROT_DOUT_PASS:
2608 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2609 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2612 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2620 isert_set_prot_checks(u8 prot_checks)
2622 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2623 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2624 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2628 isert_reg_sig_mr(struct isert_conn *isert_conn,
2629 struct se_cmd *se_cmd,
2630 struct isert_rdma_wr *rdma_wr,
2631 struct fast_reg_descriptor *fr_desc)
2633 struct ib_send_wr sig_wr, inv_wr;
2634 struct ib_send_wr *bad_wr, *wr = NULL;
2635 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2636 struct ib_sig_attrs sig_attrs;
2639 memset(&sig_attrs, 0, sizeof(sig_attrs));
2640 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2644 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2646 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2647 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
2651 memset(&sig_wr, 0, sizeof(sig_wr));
2652 sig_wr.opcode = IB_WR_REG_SIG_MR;
2653 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2654 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
2656 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2657 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2658 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2659 if (se_cmd->t_prot_sg)
2660 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
2667 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2669 isert_err("fast registration failed, ret:%d\n", ret);
2672 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2674 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2675 rdma_wr->ib_sg[SIG].addr = 0;
2676 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
2677 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2678 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2680 * We have protection guards on the wire
2681 * so we need to set a larget transfer
2683 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
2685 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2686 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2687 rdma_wr->ib_sg[SIG].lkey);
2693 isert_handle_prot_cmd(struct isert_conn *isert_conn,
2694 struct isert_cmd *isert_cmd,
2695 struct isert_rdma_wr *wr)
2697 struct isert_device *device = isert_conn->conn_device;
2698 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2701 if (!wr->fr_desc->pi_ctx) {
2702 ret = isert_create_pi_ctx(wr->fr_desc,
2706 isert_err("conn %p failed to allocate pi_ctx\n",
2712 if (se_cmd->t_prot_sg) {
2713 ret = isert_map_data_buf(isert_conn, isert_cmd,
2715 se_cmd->t_prot_nents,
2716 se_cmd->prot_length,
2717 0, wr->iser_ib_op, &wr->prot);
2719 isert_err("conn %p failed to map protection buffer\n",
2724 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2725 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2726 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2728 isert_err("conn %p failed to fast reg mr\n",
2730 goto unmap_prot_cmd;
2734 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2736 isert_err("conn %p failed to fast reg mr\n",
2738 goto unmap_prot_cmd;
2740 wr->fr_desc->ind |= ISERT_PROTECTED;
2745 if (se_cmd->t_prot_sg)
2746 isert_unmap_data_buf(isert_conn, &wr->prot);
2752 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2753 struct isert_rdma_wr *wr)
2755 struct se_cmd *se_cmd = &cmd->se_cmd;
2756 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2757 struct isert_conn *isert_conn = conn->context;
2758 struct fast_reg_descriptor *fr_desc = NULL;
2759 struct ib_send_wr *send_wr;
2760 struct ib_sge *ib_sg;
2763 unsigned long flags;
2765 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2767 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2768 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2769 se_cmd->t_data_nents, se_cmd->data_length,
2770 offset, wr->iser_ib_op, &wr->data);
2774 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
2775 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2776 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2777 struct fast_reg_descriptor, list);
2778 list_del(&fr_desc->list);
2779 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2780 wr->fr_desc = fr_desc;
2783 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2784 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
2788 if (isert_prot_cmd(isert_conn, se_cmd)) {
2789 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
2793 ib_sg = &wr->ib_sg[SIG];
2795 ib_sg = &wr->ib_sg[DATA];
2798 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2799 wr->ib_sge = &wr->s_ib_sge;
2800 wr->send_wr_num = 1;
2801 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2802 wr->send_wr = &wr->s_send_wr;
2803 wr->isert_cmd = isert_cmd;
2805 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2806 send_wr->sg_list = &wr->s_ib_sge;
2807 send_wr->num_sge = 1;
2808 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2809 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2810 send_wr->opcode = IB_WR_RDMA_WRITE;
2811 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2812 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2813 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2814 0 : IB_SEND_SIGNALED;
2816 send_wr->opcode = IB_WR_RDMA_READ;
2817 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2818 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2819 send_wr->send_flags = IB_SEND_SIGNALED;
2826 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2827 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2828 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2830 isert_unmap_data_buf(isert_conn, &wr->data);
2836 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2838 struct se_cmd *se_cmd = &cmd->se_cmd;
2839 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2840 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2841 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2842 struct isert_device *device = isert_conn->conn_device;
2843 struct ib_send_wr *wr_failed;
2846 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2847 isert_cmd, se_cmd->data_length);
2849 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2850 rc = device->reg_rdma_mem(conn, cmd, wr);
2852 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2856 if (!isert_prot_cmd(isert_conn, se_cmd)) {
2858 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2860 isert_create_send_desc(isert_conn, isert_cmd,
2861 &isert_cmd->tx_desc);
2862 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2863 &isert_cmd->tx_desc.iscsi_header);
2864 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2865 isert_init_send_wr(isert_conn, isert_cmd,
2866 &isert_cmd->tx_desc.send_wr);
2867 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2868 wr->send_wr_num += 1;
2871 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2873 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2875 if (!isert_prot_cmd(isert_conn, se_cmd))
2876 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2877 "READ\n", isert_cmd);
2879 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2886 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2888 struct se_cmd *se_cmd = &cmd->se_cmd;
2889 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2890 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2891 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2892 struct isert_device *device = isert_conn->conn_device;
2893 struct ib_send_wr *wr_failed;
2896 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2897 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2898 wr->iser_ib_op = ISER_IB_RDMA_READ;
2899 rc = device->reg_rdma_mem(conn, cmd, wr);
2901 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2905 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2907 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2909 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2916 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2921 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2922 ret = isert_put_nopin(cmd, conn, false);
2925 isert_err("Unknown immediate state: 0x%02x\n", state);
2934 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2936 struct isert_conn *isert_conn = conn->context;
2940 case ISTATE_SEND_LOGOUTRSP:
2941 ret = isert_put_logout_rsp(cmd, conn);
2943 isert_conn->logout_posted = true;
2945 case ISTATE_SEND_NOPIN:
2946 ret = isert_put_nopin(cmd, conn, true);
2948 case ISTATE_SEND_TASKMGTRSP:
2949 ret = isert_put_tm_rsp(cmd, conn);
2951 case ISTATE_SEND_REJECT:
2952 ret = isert_put_reject(cmd, conn);
2954 case ISTATE_SEND_TEXTRSP:
2955 ret = isert_put_text_rsp(cmd, conn);
2957 case ISTATE_SEND_STATUS:
2959 * Special case for sending non GOOD SCSI status from TX thread
2960 * context during pre se_cmd excecution failure.
2962 ret = isert_put_response(conn, cmd);
2965 isert_err("Unknown response state: 0x%02x\n", state);
2974 isert_setup_id(struct isert_np *isert_np)
2976 struct iscsi_np *np = isert_np->np;
2977 struct rdma_cm_id *id;
2978 struct sockaddr *sa;
2981 sa = (struct sockaddr *)&np->np_sockaddr;
2982 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2984 id = rdma_create_id(isert_cma_handler, isert_np,
2985 RDMA_PS_TCP, IB_QPT_RC);
2987 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2991 isert_dbg("id %p context %p\n", id, id->context);
2993 ret = rdma_bind_addr(id, sa);
2995 isert_err("rdma_bind_addr() failed: %d\n", ret);
2999 ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
3001 isert_err("rdma_listen() failed: %d\n", ret);
3007 rdma_destroy_id(id);
3009 return ERR_PTR(ret);
3013 isert_setup_np(struct iscsi_np *np,
3014 struct __kernel_sockaddr_storage *ksockaddr)
3016 struct isert_np *isert_np;
3017 struct rdma_cm_id *isert_lid;
3020 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3022 isert_err("Unable to allocate struct isert_np\n");
3025 sema_init(&isert_np->np_sem, 0);
3026 mutex_init(&isert_np->np_accept_mutex);
3027 INIT_LIST_HEAD(&isert_np->np_accept_list);
3028 init_completion(&isert_np->np_login_comp);
3032 * Setup the np->np_sockaddr from the passed sockaddr setup
3033 * in iscsi_target_configfs.c code..
3035 memcpy(&np->np_sockaddr, ksockaddr,
3036 sizeof(struct __kernel_sockaddr_storage));
3038 isert_lid = isert_setup_id(isert_np);
3039 if (IS_ERR(isert_lid)) {
3040 ret = PTR_ERR(isert_lid);
3044 isert_np->np_cm_id = isert_lid;
3045 np->np_context = isert_np;
3056 isert_rdma_accept(struct isert_conn *isert_conn)
3058 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3059 struct rdma_conn_param cp;
3062 memset(&cp, 0, sizeof(struct rdma_conn_param));
3063 cp.initiator_depth = isert_conn->initiator_depth;
3065 cp.rnr_retry_count = 7;
3067 ret = rdma_accept(cm_id, &cp);
3069 isert_err("rdma_accept() failed with: %d\n", ret);
3077 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3079 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3082 isert_info("before login_req comp conn: %p\n", isert_conn);
3083 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3085 isert_err("isert_conn %p interrupted before got login req\n",
3089 reinit_completion(&isert_conn->login_req_comp);
3092 * For login requests after the first PDU, isert_rx_login_req() will
3093 * kick schedule_delayed_work(&conn->login_work) as the packet is
3094 * received, which turns this callback from iscsi_target_do_login_rx()
3097 if (!login->first_request)
3100 isert_rx_login_req(isert_conn);
3102 isert_info("before conn_login_comp conn: %p\n", conn);
3103 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3107 isert_info("processing login->req: %p\n", login->req);
3113 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3114 struct isert_conn *isert_conn)
3116 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3117 struct rdma_route *cm_route = &cm_id->route;
3118 struct sockaddr_in *sock_in;
3119 struct sockaddr_in6 *sock_in6;
3121 conn->login_family = np->np_sockaddr.ss_family;
3123 if (np->np_sockaddr.ss_family == AF_INET6) {
3124 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3125 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3126 &sock_in6->sin6_addr.in6_u);
3127 conn->login_port = ntohs(sock_in6->sin6_port);
3129 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3130 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3131 &sock_in6->sin6_addr.in6_u);
3132 conn->local_port = ntohs(sock_in6->sin6_port);
3134 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3135 sprintf(conn->login_ip, "%pI4",
3136 &sock_in->sin_addr.s_addr);
3137 conn->login_port = ntohs(sock_in->sin_port);
3139 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3140 sprintf(conn->local_ip, "%pI4",
3141 &sock_in->sin_addr.s_addr);
3142 conn->local_port = ntohs(sock_in->sin_port);
3147 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3149 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3150 struct isert_conn *isert_conn;
3151 int max_accept = 0, ret;
3154 ret = down_interruptible(&isert_np->np_sem);
3155 if (ret || max_accept > 5)
3158 spin_lock_bh(&np->np_thread_lock);
3159 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3160 spin_unlock_bh(&np->np_thread_lock);
3161 isert_dbg("np_thread_state %d\n",
3162 np->np_thread_state);
3164 * No point in stalling here when np_thread
3165 * is in state RESET/SHUTDOWN/EXIT - bail
3169 spin_unlock_bh(&np->np_thread_lock);
3171 mutex_lock(&isert_np->np_accept_mutex);
3172 if (list_empty(&isert_np->np_accept_list)) {
3173 mutex_unlock(&isert_np->np_accept_mutex);
3177 isert_conn = list_first_entry(&isert_np->np_accept_list,
3178 struct isert_conn, conn_accept_node);
3179 list_del_init(&isert_conn->conn_accept_node);
3180 mutex_unlock(&isert_np->np_accept_mutex);
3182 conn->context = isert_conn;
3183 isert_conn->conn = conn;
3186 isert_set_conn_info(np, conn, isert_conn);
3188 isert_dbg("Processing isert_conn: %p\n", isert_conn);
3194 isert_free_np(struct iscsi_np *np)
3196 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3197 struct isert_conn *isert_conn, *n;
3199 if (isert_np->np_cm_id)
3200 rdma_destroy_id(isert_np->np_cm_id);
3203 * FIXME: At this point we don't have a good way to insure
3204 * that at this point we don't have hanging connections that
3205 * completed RDMA establishment but didn't start iscsi login
3206 * process. So work-around this by cleaning up what ever piled
3207 * up in np_accept_list.
3209 mutex_lock(&isert_np->np_accept_mutex);
3210 if (!list_empty(&isert_np->np_accept_list)) {
3211 isert_info("Still have isert connections, cleaning up...\n");
3212 list_for_each_entry_safe(isert_conn, n,
3213 &isert_np->np_accept_list,
3215 isert_info("cleaning isert_conn %p state (%d)\n",
3216 isert_conn, isert_conn->state);
3217 isert_connect_release(isert_conn);
3220 mutex_unlock(&isert_np->np_accept_mutex);
3222 np->np_context = NULL;
3226 static void isert_release_work(struct work_struct *work)
3228 struct isert_conn *isert_conn = container_of(work,
3232 isert_info("Starting release conn %p\n", isert_conn);
3234 wait_for_completion(&isert_conn->conn_wait);
3236 mutex_lock(&isert_conn->conn_mutex);
3237 isert_conn->state = ISER_CONN_DOWN;
3238 mutex_unlock(&isert_conn->conn_mutex);
3240 isert_info("Destroying conn %p\n", isert_conn);
3241 isert_put_conn(isert_conn);
3245 isert_wait4logout(struct isert_conn *isert_conn)
3247 struct iscsi_conn *conn = isert_conn->conn;
3249 isert_info("conn %p\n", isert_conn);
3251 if (isert_conn->logout_posted) {
3252 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
3253 wait_for_completion_timeout(&conn->conn_logout_comp,
3254 SECONDS_FOR_LOGOUT_COMP * HZ);
3259 isert_wait4cmds(struct iscsi_conn *conn)
3261 isert_info("iscsi_conn %p\n", conn);
3264 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3265 target_wait_for_sess_cmds(conn->sess->se_sess);
3270 isert_wait4flush(struct isert_conn *isert_conn)
3272 struct ib_recv_wr *bad_wr;
3274 isert_info("conn %p\n", isert_conn);
3276 init_completion(&isert_conn->conn_wait_comp_err);
3277 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3278 /* post an indication that all flush errors were consumed */
3279 if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
3280 isert_err("conn %p failed to post beacon", isert_conn);
3284 wait_for_completion(&isert_conn->conn_wait_comp_err);
3287 static void isert_wait_conn(struct iscsi_conn *conn)
3289 struct isert_conn *isert_conn = conn->context;
3291 isert_info("Starting conn %p\n", isert_conn);
3293 mutex_lock(&isert_conn->conn_mutex);
3295 * Only wait for conn_wait_comp_err if the isert_conn made it
3296 * into full feature phase..
3298 if (isert_conn->state == ISER_CONN_INIT) {
3299 mutex_unlock(&isert_conn->conn_mutex);
3302 isert_conn_terminate(isert_conn);
3303 mutex_unlock(&isert_conn->conn_mutex);
3305 isert_wait4cmds(conn);
3306 isert_wait4flush(isert_conn);
3307 isert_wait4logout(isert_conn);
3309 INIT_WORK(&isert_conn->release_work, isert_release_work);
3310 queue_work(isert_release_wq, &isert_conn->release_work);
3313 static void isert_free_conn(struct iscsi_conn *conn)
3315 struct isert_conn *isert_conn = conn->context;
3317 isert_put_conn(isert_conn);
3320 static struct iscsit_transport iser_target_transport = {
3322 .transport_type = ISCSI_INFINIBAND,
3323 .priv_size = sizeof(struct isert_cmd),
3324 .owner = THIS_MODULE,
3325 .iscsit_setup_np = isert_setup_np,
3326 .iscsit_accept_np = isert_accept_np,
3327 .iscsit_free_np = isert_free_np,
3328 .iscsit_wait_conn = isert_wait_conn,
3329 .iscsit_free_conn = isert_free_conn,
3330 .iscsit_get_login_rx = isert_get_login_rx,
3331 .iscsit_put_login_tx = isert_put_login_tx,
3332 .iscsit_immediate_queue = isert_immediate_queue,
3333 .iscsit_response_queue = isert_response_queue,
3334 .iscsit_get_dataout = isert_get_dataout,
3335 .iscsit_queue_data_in = isert_put_datain,
3336 .iscsit_queue_status = isert_put_response,
3337 .iscsit_aborted_task = isert_aborted_task,
3338 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
3341 static int __init isert_init(void)
3345 isert_comp_wq = alloc_workqueue("isert_comp_wq",
3346 WQ_UNBOUND | WQ_HIGHPRI, 0);
3347 if (!isert_comp_wq) {
3348 isert_err("Unable to allocate isert_comp_wq\n");
3353 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3354 WQ_UNBOUND_MAX_ACTIVE);
3355 if (!isert_release_wq) {
3356 isert_err("Unable to allocate isert_release_wq\n");
3358 goto destroy_comp_wq;
3361 iscsit_register_transport(&iser_target_transport);
3362 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3367 destroy_workqueue(isert_comp_wq);
3372 static void __exit isert_exit(void)
3374 flush_scheduled_work();
3375 destroy_workqueue(isert_release_wq);
3376 destroy_workqueue(isert_comp_wq);
3377 iscsit_unregister_transport(&iser_target_transport);
3378 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3381 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3382 MODULE_VERSION("0.1");
3383 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3384 MODULE_LICENSE("GPL");
3386 module_init(isert_init);
3387 module_exit(isert_exit);