1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level;
42 module_param_named(debug_level, isert_debug_level, int, 0644);
43 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex);
46 static LIST_HEAD(device_list);
47 static struct workqueue_struct *isert_comp_wq;
48 static struct workqueue_struct *isert_release_wq;
51 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
53 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
56 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
58 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
59 struct isert_rdma_wr *wr);
61 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
63 isert_rdma_post_recvl(struct isert_conn *isert_conn);
65 isert_rdma_accept(struct isert_conn *isert_conn);
66 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
69 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
71 return (conn->pi_support &&
72 cmd->prot_op != TARGET_PROT_NORMAL);
77 isert_qp_event_callback(struct ib_event *e, void *context)
79 struct isert_conn *isert_conn = (struct isert_conn *)context;
81 isert_err("conn %p event: %d\n", isert_conn, e->event);
83 case IB_EVENT_COMM_EST:
84 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
86 case IB_EVENT_QP_LAST_WQE_REACHED:
87 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
95 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
99 ret = ib_query_device(ib_dev, devattr);
101 isert_err("ib_query_device() failed: %d\n", ret);
104 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
105 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
111 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
113 struct isert_device *device = isert_conn->conn_device;
114 struct ib_qp_init_attr attr;
115 struct isert_comp *comp;
118 mutex_lock(&device_list_mutex);
119 for (i = 0; i < device->comps_used; i++)
120 if (device->comps[i].active_qps <
121 device->comps[min].active_qps)
123 comp = &device->comps[min];
125 isert_info("conn %p, using comp %p min_index: %d\n",
126 isert_conn, comp, min);
127 mutex_unlock(&device_list_mutex);
129 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
130 attr.event_handler = isert_qp_event_callback;
131 attr.qp_context = isert_conn;
132 attr.send_cq = comp->cq;
133 attr.recv_cq = comp->cq;
134 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
135 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
137 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
138 * work-around for RDMA_READs with ConnectX-2.
140 * Also, still make sure to have at least two SGEs for
141 * outgoing control PDU responses.
143 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
144 isert_conn->max_sge = attr.cap.max_send_sge;
146 attr.cap.max_recv_sge = 1;
147 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
148 attr.qp_type = IB_QPT_RC;
149 if (device->pi_capable)
150 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
152 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
154 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
157 isert_conn->conn_qp = cma_id->qp;
161 mutex_lock(&device_list_mutex);
163 mutex_unlock(&device_list_mutex);
169 isert_cq_event_callback(struct ib_event *e, void *context)
171 isert_dbg("event: %d\n", e->event);
175 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
177 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
178 struct iser_rx_desc *rx_desc;
179 struct ib_sge *rx_sg;
183 isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
184 sizeof(struct iser_rx_desc), GFP_KERNEL);
185 if (!isert_conn->conn_rx_descs)
188 rx_desc = isert_conn->conn_rx_descs;
190 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
191 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
192 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
193 if (ib_dma_mapping_error(ib_dev, dma_addr))
196 rx_desc->dma_addr = dma_addr;
198 rx_sg = &rx_desc->rx_sg;
199 rx_sg->addr = rx_desc->dma_addr;
200 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
201 rx_sg->lkey = isert_conn->conn_mr->lkey;
204 isert_conn->conn_rx_desc_head = 0;
209 rx_desc = isert_conn->conn_rx_descs;
210 for (j = 0; j < i; j++, rx_desc++) {
211 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
212 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
214 kfree(isert_conn->conn_rx_descs);
215 isert_conn->conn_rx_descs = NULL;
217 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
223 isert_free_rx_descriptors(struct isert_conn *isert_conn)
225 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
226 struct iser_rx_desc *rx_desc;
229 if (!isert_conn->conn_rx_descs)
232 rx_desc = isert_conn->conn_rx_descs;
233 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
234 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
235 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
238 kfree(isert_conn->conn_rx_descs);
239 isert_conn->conn_rx_descs = NULL;
242 static void isert_cq_work(struct work_struct *);
243 static void isert_cq_callback(struct ib_cq *, void *);
246 isert_create_device_ib_res(struct isert_device *device)
248 struct ib_device *ib_dev = device->ib_device;
249 struct ib_device_attr *dev_attr;
253 dev_attr = &device->dev_attr;
254 ret = isert_query_device(ib_dev, dev_attr);
258 max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
260 /* asign function handlers */
261 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
262 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
263 device->use_fastreg = 1;
264 device->reg_rdma_mem = isert_reg_rdma;
265 device->unreg_rdma_mem = isert_unreg_rdma;
267 device->use_fastreg = 0;
268 device->reg_rdma_mem = isert_map_rdma;
269 device->unreg_rdma_mem = isert_unmap_cmd;
272 /* Check signature cap */
273 device->pi_capable = dev_attr->device_cap_flags &
274 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
276 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
277 device->ib_device->num_comp_vectors));
278 isert_info("Using %d CQs, %s supports %d vectors support "
279 "Fast registration %d pi_capable %d\n",
280 device->comps_used, device->ib_device->name,
281 device->ib_device->num_comp_vectors, device->use_fastreg,
284 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
286 if (!device->comps) {
287 isert_err("Unable to allocate completion contexts\n");
291 for (i = 0; i < device->comps_used; i++) {
292 struct isert_comp *comp = &device->comps[i];
294 comp->device = device;
295 INIT_WORK(&comp->work, isert_cq_work);
296 comp->cq = ib_create_cq(device->ib_device,
298 isert_cq_event_callback,
301 if (IS_ERR(comp->cq)) {
302 ret = PTR_ERR(comp->cq);
307 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
315 for (i = 0; i < device->comps_used; i++) {
316 struct isert_comp *comp = &device->comps[i];
319 cancel_work_sync(&comp->work);
320 ib_destroy_cq(comp->cq);
323 kfree(device->comps);
329 isert_free_device_ib_res(struct isert_device *device)
333 isert_info("device %p\n", device);
335 for (i = 0; i < device->comps_used; i++) {
336 struct isert_comp *comp = &device->comps[i];
338 cancel_work_sync(&comp->work);
339 ib_destroy_cq(comp->cq);
342 kfree(device->comps);
346 isert_device_try_release(struct isert_device *device)
348 mutex_lock(&device_list_mutex);
350 isert_info("device %p refcount %d\n", device, device->refcount);
351 if (!device->refcount) {
352 isert_free_device_ib_res(device);
353 list_del(&device->dev_node);
356 mutex_unlock(&device_list_mutex);
359 static struct isert_device *
360 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
362 struct isert_device *device;
365 mutex_lock(&device_list_mutex);
366 list_for_each_entry(device, &device_list, dev_node) {
367 if (device->ib_device->node_guid == cma_id->device->node_guid) {
369 isert_info("Found iser device %p refcount %d\n",
370 device, device->refcount);
371 mutex_unlock(&device_list_mutex);
376 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
378 mutex_unlock(&device_list_mutex);
379 return ERR_PTR(-ENOMEM);
382 INIT_LIST_HEAD(&device->dev_node);
384 device->ib_device = cma_id->device;
385 ret = isert_create_device_ib_res(device);
388 mutex_unlock(&device_list_mutex);
393 list_add_tail(&device->dev_node, &device_list);
394 isert_info("Created a new iser device %p refcount %d\n",
395 device, device->refcount);
396 mutex_unlock(&device_list_mutex);
402 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
404 struct fast_reg_descriptor *fr_desc, *tmp;
407 if (list_empty(&isert_conn->conn_fr_pool))
410 isert_info("Freeing conn %p fastreg pool", isert_conn);
412 list_for_each_entry_safe(fr_desc, tmp,
413 &isert_conn->conn_fr_pool, list) {
414 list_del(&fr_desc->list);
415 ib_free_fast_reg_page_list(fr_desc->data_frpl);
416 ib_dereg_mr(fr_desc->data_mr);
417 if (fr_desc->pi_ctx) {
418 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
419 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
420 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
421 kfree(fr_desc->pi_ctx);
427 if (i < isert_conn->conn_fr_pool_size)
428 isert_warn("Pool still has %d regions registered\n",
429 isert_conn->conn_fr_pool_size - i);
433 isert_create_pi_ctx(struct fast_reg_descriptor *desc,
434 struct ib_device *device,
437 struct ib_mr_init_attr mr_init_attr;
438 struct pi_context *pi_ctx;
441 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
443 isert_err("Failed to allocate pi context\n");
447 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
448 ISCSI_ISER_SG_TABLESIZE);
449 if (IS_ERR(pi_ctx->prot_frpl)) {
450 isert_err("Failed to allocate prot frpl err=%ld\n",
451 PTR_ERR(pi_ctx->prot_frpl));
452 ret = PTR_ERR(pi_ctx->prot_frpl);
456 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
457 if (IS_ERR(pi_ctx->prot_mr)) {
458 isert_err("Failed to allocate prot frmr err=%ld\n",
459 PTR_ERR(pi_ctx->prot_mr));
460 ret = PTR_ERR(pi_ctx->prot_mr);
463 desc->ind |= ISERT_PROT_KEY_VALID;
465 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
466 mr_init_attr.max_reg_descriptors = 2;
467 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
468 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
469 if (IS_ERR(pi_ctx->sig_mr)) {
470 isert_err("Failed to allocate signature enabled mr err=%ld\n",
471 PTR_ERR(pi_ctx->sig_mr));
472 ret = PTR_ERR(pi_ctx->sig_mr);
476 desc->pi_ctx = pi_ctx;
477 desc->ind |= ISERT_SIG_KEY_VALID;
478 desc->ind &= ~ISERT_PROTECTED;
483 ib_dereg_mr(desc->pi_ctx->prot_mr);
485 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
493 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
494 struct fast_reg_descriptor *fr_desc)
498 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
499 ISCSI_ISER_SG_TABLESIZE);
500 if (IS_ERR(fr_desc->data_frpl)) {
501 isert_err("Failed to allocate data frpl err=%ld\n",
502 PTR_ERR(fr_desc->data_frpl));
503 return PTR_ERR(fr_desc->data_frpl);
506 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
507 if (IS_ERR(fr_desc->data_mr)) {
508 isert_err("Failed to allocate data frmr err=%ld\n",
509 PTR_ERR(fr_desc->data_mr));
510 ret = PTR_ERR(fr_desc->data_mr);
513 fr_desc->ind |= ISERT_DATA_KEY_VALID;
515 isert_dbg("Created fr_desc %p\n", fr_desc);
520 ib_free_fast_reg_page_list(fr_desc->data_frpl);
526 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
528 struct fast_reg_descriptor *fr_desc;
529 struct isert_device *device = isert_conn->conn_device;
530 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
531 struct se_node_acl *se_nacl = se_sess->se_node_acl;
534 * Setup the number of FRMRs based upon the number of tags
535 * available to session in iscsi_target_locate_portal().
537 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
538 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
540 isert_conn->conn_fr_pool_size = 0;
541 for (i = 0; i < tag_num; i++) {
542 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
544 isert_err("Failed to allocate fast_reg descriptor\n");
549 ret = isert_create_fr_desc(device->ib_device,
550 isert_conn->conn_pd, fr_desc);
552 isert_err("Failed to create fastreg descriptor err=%d\n",
558 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
559 isert_conn->conn_fr_pool_size++;
562 isert_dbg("Creating conn %p fastreg pool size=%d",
563 isert_conn, isert_conn->conn_fr_pool_size);
568 isert_conn_free_fastreg_pool(isert_conn);
573 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
575 struct isert_np *isert_np = cma_id->context;
576 struct iscsi_np *np = isert_np->np;
577 struct isert_conn *isert_conn;
578 struct isert_device *device;
579 struct ib_device *ib_dev = cma_id->device;
582 spin_lock_bh(&np->np_thread_lock);
584 spin_unlock_bh(&np->np_thread_lock);
585 isert_dbg("iscsi_np is not enabled, reject connect request\n");
586 return rdma_reject(cma_id, NULL, 0);
588 spin_unlock_bh(&np->np_thread_lock);
590 isert_dbg("cma_id: %p, portal: %p\n",
591 cma_id, cma_id->context);
593 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
595 isert_err("Unable to allocate isert_conn\n");
598 isert_conn->state = ISER_CONN_INIT;
599 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
600 init_completion(&isert_conn->conn_login_comp);
601 init_completion(&isert_conn->login_req_comp);
602 init_completion(&isert_conn->conn_wait);
603 kref_init(&isert_conn->conn_kref);
604 mutex_init(&isert_conn->conn_mutex);
605 spin_lock_init(&isert_conn->conn_lock);
606 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
608 isert_conn->conn_cm_id = cma_id;
610 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
611 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
612 if (!isert_conn->login_buf) {
613 isert_err("Unable to allocate isert_conn->login_buf\n");
618 isert_conn->login_req_buf = isert_conn->login_buf;
619 isert_conn->login_rsp_buf = isert_conn->login_buf +
620 ISCSI_DEF_MAX_RECV_SEG_LEN;
621 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
622 isert_conn->login_buf, isert_conn->login_req_buf,
623 isert_conn->login_rsp_buf);
625 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
626 (void *)isert_conn->login_req_buf,
627 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
629 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
631 isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
633 isert_conn->login_req_dma = 0;
637 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
638 (void *)isert_conn->login_rsp_buf,
639 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
641 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
643 isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
645 isert_conn->login_rsp_dma = 0;
646 goto out_req_dma_map;
649 device = isert_device_find_by_ib_dev(cma_id);
650 if (IS_ERR(device)) {
651 ret = PTR_ERR(device);
652 goto out_rsp_dma_map;
655 /* Set max inflight RDMA READ requests */
656 isert_conn->initiator_depth = min_t(u8,
657 event->param.conn.initiator_depth,
658 device->dev_attr.max_qp_init_rd_atom);
659 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
661 isert_conn->conn_device = device;
662 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
663 if (IS_ERR(isert_conn->conn_pd)) {
664 ret = PTR_ERR(isert_conn->conn_pd);
665 isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
670 isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
671 IB_ACCESS_LOCAL_WRITE);
672 if (IS_ERR(isert_conn->conn_mr)) {
673 ret = PTR_ERR(isert_conn->conn_mr);
674 isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
679 ret = isert_conn_setup_qp(isert_conn, cma_id);
683 ret = isert_rdma_post_recvl(isert_conn);
687 ret = isert_rdma_accept(isert_conn);
691 mutex_lock(&isert_np->np_accept_mutex);
692 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
693 mutex_unlock(&isert_np->np_accept_mutex);
695 isert_info("np %p: Allow accept_np to continue\n", np);
696 up(&isert_np->np_sem);
700 ib_dereg_mr(isert_conn->conn_mr);
702 ib_dealloc_pd(isert_conn->conn_pd);
704 isert_device_try_release(device);
706 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
707 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
709 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
710 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
712 kfree(isert_conn->login_buf);
715 rdma_reject(cma_id, NULL, 0);
720 isert_connect_release(struct isert_conn *isert_conn)
722 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
723 struct isert_device *device = isert_conn->conn_device;
725 isert_dbg("conn %p\n", isert_conn);
727 if (device && device->use_fastreg)
728 isert_conn_free_fastreg_pool(isert_conn);
730 isert_free_rx_descriptors(isert_conn);
731 rdma_destroy_id(isert_conn->conn_cm_id);
733 if (isert_conn->conn_qp) {
734 struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
736 isert_dbg("dec completion context %p active_qps\n", comp);
737 mutex_lock(&device_list_mutex);
739 mutex_unlock(&device_list_mutex);
741 ib_destroy_qp(isert_conn->conn_qp);
744 ib_dereg_mr(isert_conn->conn_mr);
745 ib_dealloc_pd(isert_conn->conn_pd);
747 if (isert_conn->login_buf) {
748 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
749 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
750 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
751 ISCSI_DEF_MAX_RECV_SEG_LEN,
753 kfree(isert_conn->login_buf);
758 isert_device_try_release(device);
762 isert_connected_handler(struct rdma_cm_id *cma_id)
764 struct isert_conn *isert_conn = cma_id->qp->qp_context;
766 isert_info("conn %p\n", isert_conn);
768 if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
769 isert_warn("conn %p connect_release is running\n", isert_conn);
773 mutex_lock(&isert_conn->conn_mutex);
774 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
775 isert_conn->state = ISER_CONN_UP;
776 mutex_unlock(&isert_conn->conn_mutex);
780 isert_release_conn_kref(struct kref *kref)
782 struct isert_conn *isert_conn = container_of(kref,
783 struct isert_conn, conn_kref);
785 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
788 isert_connect_release(isert_conn);
792 isert_put_conn(struct isert_conn *isert_conn)
794 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
798 * isert_conn_terminate() - Initiate connection termination
799 * @isert_conn: isert connection struct
802 * In case the connection state is FULL_FEATURE, move state
803 * to TEMINATING and start teardown sequence (rdma_disconnect).
804 * In case the connection state is UP, complete flush as well.
806 * This routine must be called with conn_mutex held. Thus it is
807 * safe to call multiple times.
810 isert_conn_terminate(struct isert_conn *isert_conn)
814 switch (isert_conn->state) {
815 case ISER_CONN_TERMINATING:
818 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
819 isert_info("Terminating conn %p state %d\n",
820 isert_conn, isert_conn->state);
821 isert_conn->state = ISER_CONN_TERMINATING;
822 err = rdma_disconnect(isert_conn->conn_cm_id);
824 isert_warn("Failed rdma_disconnect isert_conn %p\n",
828 isert_warn("conn %p teminating in state %d\n",
829 isert_conn, isert_conn->state);
834 isert_np_cma_handler(struct isert_np *isert_np,
835 enum rdma_cm_event_type event)
837 isert_dbg("isert np %p, handling event %d\n", isert_np, event);
840 case RDMA_CM_EVENT_DEVICE_REMOVAL:
841 isert_np->np_cm_id = NULL;
843 case RDMA_CM_EVENT_ADDR_CHANGE:
844 isert_np->np_cm_id = isert_setup_id(isert_np);
845 if (IS_ERR(isert_np->np_cm_id)) {
846 isert_err("isert np %p setup id failed: %ld\n",
847 isert_np, PTR_ERR(isert_np->np_cm_id));
848 isert_np->np_cm_id = NULL;
852 isert_err("isert np %p Unexpected event %d\n",
860 isert_disconnected_handler(struct rdma_cm_id *cma_id,
861 enum rdma_cm_event_type event)
863 struct isert_np *isert_np = cma_id->context;
864 struct isert_conn *isert_conn;
866 if (isert_np->np_cm_id == cma_id)
867 return isert_np_cma_handler(cma_id->context, event);
869 isert_conn = cma_id->qp->qp_context;
871 mutex_lock(&isert_conn->conn_mutex);
872 isert_conn_terminate(isert_conn);
873 mutex_unlock(&isert_conn->conn_mutex);
875 isert_info("conn %p completing conn_wait\n", isert_conn);
876 complete(&isert_conn->conn_wait);
882 isert_connect_error(struct rdma_cm_id *cma_id)
884 struct isert_conn *isert_conn = cma_id->qp->qp_context;
886 isert_put_conn(isert_conn);
890 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
894 isert_info("event %d status %d id %p np %p\n", event->event,
895 event->status, cma_id, cma_id->context);
897 switch (event->event) {
898 case RDMA_CM_EVENT_CONNECT_REQUEST:
899 ret = isert_connect_request(cma_id, event);
901 isert_err("failed handle connect request %d\n", ret);
903 case RDMA_CM_EVENT_ESTABLISHED:
904 isert_connected_handler(cma_id);
906 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
907 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
908 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
909 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
910 ret = isert_disconnected_handler(cma_id, event->event);
912 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
913 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
914 case RDMA_CM_EVENT_CONNECT_ERROR:
915 isert_connect_error(cma_id);
918 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
926 isert_post_recv(struct isert_conn *isert_conn, u32 count)
928 struct ib_recv_wr *rx_wr, *rx_wr_failed;
930 unsigned int rx_head = isert_conn->conn_rx_desc_head;
931 struct iser_rx_desc *rx_desc;
933 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
934 rx_desc = &isert_conn->conn_rx_descs[rx_head];
935 rx_wr->wr_id = (uintptr_t)rx_desc;
936 rx_wr->sg_list = &rx_desc->rx_sg;
938 rx_wr->next = rx_wr + 1;
939 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
943 rx_wr->next = NULL; /* mark end of work requests list */
945 isert_conn->post_recv_buf_count += count;
946 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
949 isert_err("ib_post_recv() failed with ret: %d\n", ret);
950 isert_conn->post_recv_buf_count -= count;
952 isert_dbg("Posted %d RX buffers\n", count);
953 isert_conn->conn_rx_desc_head = rx_head;
959 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
961 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
962 struct ib_send_wr send_wr, *send_wr_failed;
965 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
966 ISER_HEADERS_LEN, DMA_TO_DEVICE);
969 send_wr.wr_id = (uintptr_t)tx_desc;
970 send_wr.sg_list = tx_desc->tx_sg;
971 send_wr.num_sge = tx_desc->num_sge;
972 send_wr.opcode = IB_WR_SEND;
973 send_wr.send_flags = IB_SEND_SIGNALED;
975 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
977 isert_err("ib_post_send() failed, ret: %d\n", ret);
983 isert_create_send_desc(struct isert_conn *isert_conn,
984 struct isert_cmd *isert_cmd,
985 struct iser_tx_desc *tx_desc)
987 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
989 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
990 ISER_HEADERS_LEN, DMA_TO_DEVICE);
992 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
993 tx_desc->iser_header.flags = ISER_VER;
995 tx_desc->num_sge = 1;
996 tx_desc->isert_cmd = isert_cmd;
998 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
999 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1000 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
1005 isert_init_tx_hdrs(struct isert_conn *isert_conn,
1006 struct iser_tx_desc *tx_desc)
1008 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1011 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1012 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1013 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
1014 isert_err("ib_dma_mapping_error() failed\n");
1018 tx_desc->dma_addr = dma_addr;
1019 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1020 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1021 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
1023 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1024 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
1025 tx_desc->tx_sg[0].lkey);
1031 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1032 struct ib_send_wr *send_wr)
1034 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1036 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
1037 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
1038 send_wr->opcode = IB_WR_SEND;
1039 send_wr->sg_list = &tx_desc->tx_sg[0];
1040 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
1041 send_wr->send_flags = IB_SEND_SIGNALED;
1045 isert_rdma_post_recvl(struct isert_conn *isert_conn)
1047 struct ib_recv_wr rx_wr, *rx_wr_fail;
1051 memset(&sge, 0, sizeof(struct ib_sge));
1052 sge.addr = isert_conn->login_req_dma;
1053 sge.length = ISER_RX_LOGIN_SIZE;
1054 sge.lkey = isert_conn->conn_mr->lkey;
1056 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1057 sge.addr, sge.length, sge.lkey);
1059 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1060 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
1061 rx_wr.sg_list = &sge;
1064 isert_conn->post_recv_buf_count++;
1065 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1067 isert_err("ib_post_recv() failed: %d\n", ret);
1068 isert_conn->post_recv_buf_count--;
1075 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1078 struct isert_conn *isert_conn = conn->context;
1079 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1080 struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
1083 isert_create_send_desc(isert_conn, NULL, tx_desc);
1085 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1086 sizeof(struct iscsi_hdr));
1088 isert_init_tx_hdrs(isert_conn, tx_desc);
1091 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1093 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1094 length, DMA_TO_DEVICE);
1096 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1098 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1099 length, DMA_TO_DEVICE);
1101 tx_dsg->addr = isert_conn->login_rsp_dma;
1102 tx_dsg->length = length;
1103 tx_dsg->lkey = isert_conn->conn_mr->lkey;
1104 tx_desc->num_sge = 2;
1106 if (!login->login_failed) {
1107 if (login->login_complete) {
1108 if (!conn->sess->sess_ops->SessionType &&
1109 isert_conn->conn_device->use_fastreg) {
1110 ret = isert_conn_create_fastreg_pool(isert_conn);
1112 isert_err("Conn: %p failed to create"
1113 " fastreg pool\n", isert_conn);
1118 ret = isert_alloc_rx_descriptors(isert_conn);
1122 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1126 /* Now we are in FULL_FEATURE phase */
1127 mutex_lock(&isert_conn->conn_mutex);
1128 isert_conn->state = ISER_CONN_FULL_FEATURE;
1129 mutex_unlock(&isert_conn->conn_mutex);
1133 ret = isert_rdma_post_recvl(isert_conn);
1138 ret = isert_post_send(isert_conn, tx_desc);
1146 isert_rx_login_req(struct isert_conn *isert_conn)
1148 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1149 int rx_buflen = isert_conn->login_req_len;
1150 struct iscsi_conn *conn = isert_conn->conn;
1151 struct iscsi_login *login = conn->conn_login;
1154 isert_info("conn %p\n", isert_conn);
1156 WARN_ON_ONCE(!login);
1158 if (login->first_request) {
1159 struct iscsi_login_req *login_req =
1160 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1162 * Setup the initial iscsi_login values from the leading
1163 * login request PDU.
1165 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1166 login->current_stage =
1167 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1169 login->version_min = login_req->min_version;
1170 login->version_max = login_req->max_version;
1171 memcpy(login->isid, login_req->isid, 6);
1172 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1173 login->init_task_tag = login_req->itt;
1174 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1175 login->cid = be16_to_cpu(login_req->cid);
1176 login->tsih = be16_to_cpu(login_req->tsih);
1179 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1181 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1182 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1183 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1184 MAX_KEY_VALUE_PAIRS);
1185 memcpy(login->req_buf, &rx_desc->data[0], size);
1187 if (login->first_request) {
1188 complete(&isert_conn->conn_login_comp);
1191 schedule_delayed_work(&conn->login_work, 0);
1194 static struct iscsi_cmd
1195 *isert_allocate_cmd(struct iscsi_conn *conn)
1197 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1198 struct isert_cmd *isert_cmd;
1199 struct iscsi_cmd *cmd;
1201 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1203 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1206 isert_cmd = iscsit_priv_cmd(cmd);
1207 isert_cmd->conn = isert_conn;
1208 isert_cmd->iscsi_cmd = cmd;
1214 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1215 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1216 struct iser_rx_desc *rx_desc, unsigned char *buf)
1218 struct iscsi_conn *conn = isert_conn->conn;
1219 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1220 struct scatterlist *sg;
1221 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1222 bool dump_payload = false;
1224 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1228 imm_data = cmd->immediate_data;
1229 imm_data_len = cmd->first_burst_len;
1230 unsol_data = cmd->unsolicited_data;
1232 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1235 } else if (rc > 0) {
1236 dump_payload = true;
1243 sg = &cmd->se_cmd.t_data_sg[0];
1244 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1246 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1247 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1249 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1251 cmd->write_data_done += imm_data_len;
1253 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1254 spin_lock_bh(&cmd->istate_lock);
1255 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1256 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1257 spin_unlock_bh(&cmd->istate_lock);
1261 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1263 if (!rc && dump_payload == false && unsol_data)
1264 iscsit_set_unsoliticed_dataout(cmd);
1265 else if (dump_payload && imm_data)
1266 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1272 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1273 struct iser_rx_desc *rx_desc, unsigned char *buf)
1275 struct scatterlist *sg_start;
1276 struct iscsi_conn *conn = isert_conn->conn;
1277 struct iscsi_cmd *cmd = NULL;
1278 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1279 u32 unsol_data_len = ntoh24(hdr->dlength);
1280 int rc, sg_nents, sg_off, page_off;
1282 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1288 * FIXME: Unexpected unsolicited_data out
1290 if (!cmd->unsolicited_data) {
1291 isert_err("Received unexpected solicited data payload\n");
1296 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1297 "write_data_done: %u, data_length: %u\n",
1298 unsol_data_len, cmd->write_data_done,
1299 cmd->se_cmd.data_length);
1301 sg_off = cmd->write_data_done / PAGE_SIZE;
1302 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1303 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1304 page_off = cmd->write_data_done % PAGE_SIZE;
1306 * FIXME: Non page-aligned unsolicited_data out
1309 isert_err("unexpected non-page aligned data payload\n");
1313 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1314 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1315 sg_nents, &rx_desc->data[0], unsol_data_len);
1317 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1320 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1328 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1329 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1332 struct iscsi_conn *conn = isert_conn->conn;
1333 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1336 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1340 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1343 return iscsit_process_nop_out(conn, cmd, hdr);
1347 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1348 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1349 struct iscsi_text *hdr)
1351 struct iscsi_conn *conn = isert_conn->conn;
1352 u32 payload_length = ntoh24(hdr->dlength);
1354 unsigned char *text_in = NULL;
1356 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1360 if (payload_length) {
1361 text_in = kzalloc(payload_length, GFP_KERNEL);
1363 isert_err("Unable to allocate text_in of payload_length: %u\n",
1368 cmd->text_in_ptr = text_in;
1370 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1372 return iscsit_process_text_cmd(conn, cmd, hdr);
1376 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1377 uint32_t read_stag, uint64_t read_va,
1378 uint32_t write_stag, uint64_t write_va)
1380 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1381 struct iscsi_conn *conn = isert_conn->conn;
1382 struct iscsi_session *sess = conn->sess;
1383 struct iscsi_cmd *cmd;
1384 struct isert_cmd *isert_cmd;
1386 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1388 if (sess->sess_ops->SessionType &&
1389 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1390 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1391 " ignoring\n", opcode);
1396 case ISCSI_OP_SCSI_CMD:
1397 cmd = isert_allocate_cmd(conn);
1401 isert_cmd = iscsit_priv_cmd(cmd);
1402 isert_cmd->read_stag = read_stag;
1403 isert_cmd->read_va = read_va;
1404 isert_cmd->write_stag = write_stag;
1405 isert_cmd->write_va = write_va;
1407 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1408 rx_desc, (unsigned char *)hdr);
1410 case ISCSI_OP_NOOP_OUT:
1411 cmd = isert_allocate_cmd(conn);
1415 isert_cmd = iscsit_priv_cmd(cmd);
1416 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1417 rx_desc, (unsigned char *)hdr);
1419 case ISCSI_OP_SCSI_DATA_OUT:
1420 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1421 (unsigned char *)hdr);
1423 case ISCSI_OP_SCSI_TMFUNC:
1424 cmd = isert_allocate_cmd(conn);
1428 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1429 (unsigned char *)hdr);
1431 case ISCSI_OP_LOGOUT:
1432 cmd = isert_allocate_cmd(conn);
1436 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1439 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
1440 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1444 cmd = isert_allocate_cmd(conn);
1449 isert_cmd = iscsit_priv_cmd(cmd);
1450 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1451 rx_desc, (struct iscsi_text *)hdr);
1454 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1463 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1465 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1466 uint64_t read_va = 0, write_va = 0;
1467 uint32_t read_stag = 0, write_stag = 0;
1470 switch (iser_hdr->flags & 0xF0) {
1472 if (iser_hdr->flags & ISER_RSV) {
1473 read_stag = be32_to_cpu(iser_hdr->read_stag);
1474 read_va = be64_to_cpu(iser_hdr->read_va);
1475 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1476 read_stag, (unsigned long long)read_va);
1478 if (iser_hdr->flags & ISER_WSV) {
1479 write_stag = be32_to_cpu(iser_hdr->write_stag);
1480 write_va = be64_to_cpu(iser_hdr->write_va);
1481 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1482 write_stag, (unsigned long long)write_va);
1485 isert_dbg("ISER ISCSI_CTRL PDU\n");
1488 isert_err("iSER Hello message\n");
1491 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1495 rc = isert_rx_opcode(isert_conn, rx_desc,
1496 read_stag, read_va, write_stag, write_va);
1500 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1503 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1504 struct iscsi_hdr *hdr;
1506 int rx_buflen, outstanding;
1508 if ((char *)desc == isert_conn->login_req_buf) {
1509 rx_dma = isert_conn->login_req_dma;
1510 rx_buflen = ISER_RX_LOGIN_SIZE;
1511 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1514 rx_dma = desc->dma_addr;
1515 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1516 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1520 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1522 hdr = &desc->iscsi_header;
1523 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1524 hdr->opcode, hdr->itt, hdr->flags,
1525 (int)(xfer_len - ISER_HEADERS_LEN));
1527 if ((char *)desc == isert_conn->login_req_buf) {
1528 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1529 if (isert_conn->conn) {
1530 struct iscsi_login *login = isert_conn->conn->conn_login;
1532 if (login && !login->first_request)
1533 isert_rx_login_req(isert_conn);
1535 mutex_lock(&isert_conn->conn_mutex);
1536 complete(&isert_conn->login_req_comp);
1537 mutex_unlock(&isert_conn->conn_mutex);
1539 isert_rx_do_work(desc, isert_conn);
1542 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1545 isert_conn->post_recv_buf_count--;
1546 isert_dbg("Decremented post_recv_buf_count: %d\n",
1547 isert_conn->post_recv_buf_count);
1549 if ((char *)desc == isert_conn->login_req_buf)
1552 outstanding = isert_conn->post_recv_buf_count;
1553 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1554 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1555 ISERT_MIN_POSTED_RX);
1556 err = isert_post_recv(isert_conn, count);
1558 isert_err("isert_post_recv() count: %d failed, %d\n",
1565 isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1566 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1567 enum iser_ib_op_code op, struct isert_data_buf *data)
1569 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1571 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1572 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1574 data->len = length - offset;
1575 data->offset = offset;
1576 data->sg_off = data->offset / PAGE_SIZE;
1578 data->sg = &sg[data->sg_off];
1579 data->nents = min_t(unsigned int, nents - data->sg_off,
1580 ISCSI_ISER_SG_TABLESIZE);
1581 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1584 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1586 if (unlikely(!data->dma_nents)) {
1587 isert_err("Cmd: unable to dma map SGs %p\n", sg);
1591 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1592 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1598 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1600 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1602 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1603 memset(data, 0, sizeof(*data));
1609 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1611 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1613 isert_dbg("Cmd %p\n", isert_cmd);
1616 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1617 isert_unmap_data_buf(isert_conn, &wr->data);
1621 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1627 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
1634 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1636 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1637 LIST_HEAD(unmap_list);
1639 isert_dbg("Cmd %p\n", isert_cmd);
1642 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
1643 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1644 isert_unmap_data_buf(isert_conn, &wr->prot);
1645 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1647 spin_lock_bh(&isert_conn->conn_lock);
1648 list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
1649 spin_unlock_bh(&isert_conn->conn_lock);
1654 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1655 isert_unmap_data_buf(isert_conn, &wr->data);
1663 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1665 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1666 struct isert_conn *isert_conn = isert_cmd->conn;
1667 struct iscsi_conn *conn = isert_conn->conn;
1668 struct isert_device *device = isert_conn->conn_device;
1669 struct iscsi_text_rsp *hdr;
1671 isert_dbg("Cmd %p\n", isert_cmd);
1673 switch (cmd->iscsi_opcode) {
1674 case ISCSI_OP_SCSI_CMD:
1675 spin_lock_bh(&conn->cmd_lock);
1676 if (!list_empty(&cmd->i_conn_node))
1677 list_del_init(&cmd->i_conn_node);
1678 spin_unlock_bh(&conn->cmd_lock);
1680 if (cmd->data_direction == DMA_TO_DEVICE) {
1681 iscsit_stop_dataout_timer(cmd);
1683 * Check for special case during comp_err where
1684 * WRITE_PENDING has been handed off from core,
1685 * but requires an extra target_put_sess_cmd()
1686 * before transport_generic_free_cmd() below.
1689 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1690 struct se_cmd *se_cmd = &cmd->se_cmd;
1692 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1696 device->unreg_rdma_mem(isert_cmd, isert_conn);
1697 transport_generic_free_cmd(&cmd->se_cmd, 0);
1699 case ISCSI_OP_SCSI_TMFUNC:
1700 spin_lock_bh(&conn->cmd_lock);
1701 if (!list_empty(&cmd->i_conn_node))
1702 list_del_init(&cmd->i_conn_node);
1703 spin_unlock_bh(&conn->cmd_lock);
1705 transport_generic_free_cmd(&cmd->se_cmd, 0);
1707 case ISCSI_OP_REJECT:
1708 case ISCSI_OP_NOOP_OUT:
1710 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1711 /* If the continue bit is on, keep the command alive */
1712 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1715 spin_lock_bh(&conn->cmd_lock);
1716 if (!list_empty(&cmd->i_conn_node))
1717 list_del_init(&cmd->i_conn_node);
1718 spin_unlock_bh(&conn->cmd_lock);
1721 * Handle special case for REJECT when iscsi_add_reject*() has
1722 * overwritten the original iscsi_opcode assignment, and the
1723 * associated cmd->se_cmd needs to be released.
1725 if (cmd->se_cmd.se_tfo != NULL) {
1726 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1728 transport_generic_free_cmd(&cmd->se_cmd, 0);
1735 iscsit_release_cmd(cmd);
1741 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1743 if (tx_desc->dma_addr != 0) {
1744 isert_dbg("unmap single for tx_desc->dma_addr\n");
1745 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1746 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1747 tx_desc->dma_addr = 0;
1752 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1753 struct ib_device *ib_dev, bool comp_err)
1755 if (isert_cmd->pdu_buf_dma != 0) {
1756 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1757 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1758 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1759 isert_cmd->pdu_buf_dma = 0;
1762 isert_unmap_tx_desc(tx_desc, ib_dev);
1763 isert_put_cmd(isert_cmd, comp_err);
1767 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1769 struct ib_mr_status mr_status;
1772 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1774 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1775 goto fail_mr_status;
1778 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1780 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1782 switch (mr_status.sig_err.err_type) {
1783 case IB_SIG_BAD_GUARD:
1784 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1786 case IB_SIG_BAD_REFTAG:
1787 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1789 case IB_SIG_BAD_APPTAG:
1790 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1793 sec_offset_err = mr_status.sig_err.sig_err_offset;
1794 do_div(sec_offset_err, block_size);
1795 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1797 isert_err("PI error found type %d at sector 0x%llx "
1798 "expected 0x%x vs actual 0x%x\n",
1799 mr_status.sig_err.err_type,
1800 (unsigned long long)se_cmd->bad_sector,
1801 mr_status.sig_err.expected,
1802 mr_status.sig_err.actual);
1811 isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1812 struct isert_cmd *isert_cmd)
1814 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1815 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1816 struct se_cmd *se_cmd = &cmd->se_cmd;
1817 struct isert_conn *isert_conn = isert_cmd->conn;
1818 struct isert_device *device = isert_conn->conn_device;
1821 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1822 ret = isert_check_pi_status(se_cmd,
1823 wr->fr_desc->pi_ctx->sig_mr);
1824 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1827 device->unreg_rdma_mem(isert_cmd, isert_conn);
1828 wr->send_wr_num = 0;
1830 transport_send_check_condition_and_sense(se_cmd,
1833 isert_put_response(isert_conn->conn, cmd);
1837 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1838 struct isert_cmd *isert_cmd)
1840 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1841 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1842 struct se_cmd *se_cmd = &cmd->se_cmd;
1843 struct isert_conn *isert_conn = isert_cmd->conn;
1844 struct isert_device *device = isert_conn->conn_device;
1847 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1848 ret = isert_check_pi_status(se_cmd,
1849 wr->fr_desc->pi_ctx->sig_mr);
1850 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1853 iscsit_stop_dataout_timer(cmd);
1854 device->unreg_rdma_mem(isert_cmd, isert_conn);
1855 cmd->write_data_done = wr->data.len;
1856 wr->send_wr_num = 0;
1858 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1859 spin_lock_bh(&cmd->istate_lock);
1860 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1861 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1862 spin_unlock_bh(&cmd->istate_lock);
1865 transport_send_check_condition_and_sense(se_cmd,
1868 target_execute_cmd(se_cmd);
1872 isert_do_control_comp(struct work_struct *work)
1874 struct isert_cmd *isert_cmd = container_of(work,
1875 struct isert_cmd, comp_work);
1876 struct isert_conn *isert_conn = isert_cmd->conn;
1877 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1878 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1880 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1882 switch (cmd->i_state) {
1883 case ISTATE_SEND_TASKMGTRSP:
1884 iscsit_tmr_post_handler(cmd, cmd->conn);
1885 case ISTATE_SEND_REJECT: /* FALLTHRU */
1886 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1887 cmd->i_state = ISTATE_SENT_STATUS;
1888 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1891 case ISTATE_SEND_LOGOUTRSP:
1892 iscsit_logout_post_handler(cmd, cmd->conn);
1895 isert_err("Unknown i_state %d\n", cmd->i_state);
1902 isert_response_completion(struct iser_tx_desc *tx_desc,
1903 struct isert_cmd *isert_cmd,
1904 struct isert_conn *isert_conn,
1905 struct ib_device *ib_dev)
1907 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1909 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1910 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1911 cmd->i_state == ISTATE_SEND_REJECT ||
1912 cmd->i_state == ISTATE_SEND_TEXTRSP) {
1913 isert_unmap_tx_desc(tx_desc, ib_dev);
1915 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1916 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1920 cmd->i_state = ISTATE_SENT_STATUS;
1921 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1925 isert_send_completion(struct iser_tx_desc *tx_desc,
1926 struct isert_conn *isert_conn)
1928 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1929 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1930 struct isert_rdma_wr *wr;
1933 isert_unmap_tx_desc(tx_desc, ib_dev);
1936 wr = &isert_cmd->rdma_wr;
1938 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
1940 switch (wr->iser_ib_op) {
1942 isert_err("Got ISER_IB_RECV\n");
1946 isert_response_completion(tx_desc, isert_cmd,
1947 isert_conn, ib_dev);
1949 case ISER_IB_RDMA_WRITE:
1950 isert_completion_rdma_write(tx_desc, isert_cmd);
1952 case ISER_IB_RDMA_READ:
1953 isert_completion_rdma_read(tx_desc, isert_cmd);
1956 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
1963 * is_isert_tx_desc() - Indicate if the completion wr_id
1964 * is a TX descriptor or not.
1965 * @isert_conn: iser connection
1966 * @wr_id: completion WR identifier
1968 * Since we cannot rely on wc opcode in FLUSH errors
1969 * we must work around it by checking if the wr_id address
1970 * falls in the iser connection rx_descs buffer. If so
1971 * it is an RX descriptor, otherwize it is a TX.
1974 is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
1976 void *start = isert_conn->conn_rx_descs;
1977 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
1979 if (wr_id >= start && wr_id < start + len)
1986 isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
1988 if (wc->wr_id == ISER_BEACON_WRID) {
1989 isert_info("conn %p completing conn_wait_comp_err\n",
1991 complete(&isert_conn->conn_wait_comp_err);
1992 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
1993 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1994 struct isert_cmd *isert_cmd;
1995 struct iser_tx_desc *desc;
1997 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
1998 isert_cmd = desc->isert_cmd;
2000 isert_unmap_tx_desc(desc, ib_dev);
2002 isert_completion_put(desc, isert_cmd, ib_dev, true);
2004 isert_conn->post_recv_buf_count--;
2005 if (!isert_conn->post_recv_buf_count)
2006 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2011 isert_handle_wc(struct ib_wc *wc)
2013 struct isert_conn *isert_conn;
2014 struct iser_tx_desc *tx_desc;
2015 struct iser_rx_desc *rx_desc;
2017 isert_conn = wc->qp->qp_context;
2018 if (likely(wc->status == IB_WC_SUCCESS)) {
2019 if (wc->opcode == IB_WC_RECV) {
2020 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
2021 isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
2023 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2024 isert_send_completion(tx_desc, isert_conn);
2027 if (wc->status != IB_WC_WR_FLUSH_ERR)
2028 isert_err("wr id %llx status %d vend_err %x\n",
2029 wc->wr_id, wc->status, wc->vendor_err);
2031 isert_dbg("flush error: wr id %llx\n", wc->wr_id);
2033 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2034 isert_cq_comp_err(isert_conn, wc);
2039 isert_cq_work(struct work_struct *work)
2041 enum { isert_poll_budget = 65536 };
2042 struct isert_comp *comp = container_of(work, struct isert_comp,
2044 struct ib_wc *const wcs = comp->wcs;
2045 int i, n, completed = 0;
2047 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2048 for (i = 0; i < n; i++)
2049 isert_handle_wc(&wcs[i]);
2052 if (completed >= isert_poll_budget)
2056 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
2060 isert_cq_callback(struct ib_cq *cq, void *context)
2062 struct isert_comp *comp = context;
2064 queue_work(isert_comp_wq, &comp->work);
2068 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2070 struct ib_send_wr *wr_failed;
2073 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2076 isert_err("ib_post_send failed with %d\n", ret);
2083 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2085 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2086 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2087 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2088 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2089 &isert_cmd->tx_desc.iscsi_header;
2091 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2092 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2093 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2095 * Attach SENSE DATA payload to iSCSI Response PDU
2097 if (cmd->se_cmd.sense_buffer &&
2098 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2099 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2100 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2101 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2102 u32 padding, pdu_len;
2104 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2106 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2108 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2109 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
2110 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
2112 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2113 (void *)cmd->sense_buffer, pdu_len,
2116 isert_cmd->pdu_buf_len = pdu_len;
2117 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2118 tx_dsg->length = pdu_len;
2119 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2120 isert_cmd->tx_desc.num_sge = 2;
2123 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2125 isert_dbg("Posting SCSI Response\n");
2127 return isert_post_response(isert_conn, isert_cmd);
2131 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2133 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2134 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2135 struct isert_device *device = isert_conn->conn_device;
2137 spin_lock_bh(&conn->cmd_lock);
2138 if (!list_empty(&cmd->i_conn_node))
2139 list_del_init(&cmd->i_conn_node);
2140 spin_unlock_bh(&conn->cmd_lock);
2142 if (cmd->data_direction == DMA_TO_DEVICE)
2143 iscsit_stop_dataout_timer(cmd);
2145 device->unreg_rdma_mem(isert_cmd, isert_conn);
2148 static enum target_prot_op
2149 isert_get_sup_prot_ops(struct iscsi_conn *conn)
2151 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2152 struct isert_device *device = isert_conn->conn_device;
2154 if (conn->tpg->tpg_attrib.t10_pi) {
2155 if (device->pi_capable) {
2156 isert_info("conn %p PI offload enabled\n", isert_conn);
2157 isert_conn->pi_support = true;
2158 return TARGET_PROT_ALL;
2162 isert_info("conn %p PI offload disabled\n", isert_conn);
2163 isert_conn->pi_support = false;
2165 return TARGET_PROT_NORMAL;
2169 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2170 bool nopout_response)
2172 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2173 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2174 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2176 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2177 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2178 &isert_cmd->tx_desc.iscsi_header,
2180 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2181 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2183 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
2185 return isert_post_response(isert_conn, isert_cmd);
2189 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2191 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2192 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2193 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2195 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2196 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2197 &isert_cmd->tx_desc.iscsi_header);
2198 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2199 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2201 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
2203 return isert_post_response(isert_conn, isert_cmd);
2207 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2209 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2210 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2211 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2213 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2214 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2215 &isert_cmd->tx_desc.iscsi_header);
2216 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2217 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2219 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
2221 return isert_post_response(isert_conn, isert_cmd);
2225 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2227 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2228 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2229 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2230 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2231 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2232 struct iscsi_reject *hdr =
2233 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
2235 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2236 iscsit_build_reject(cmd, conn, hdr);
2237 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2239 hton24(hdr->dlength, ISCSI_HDR_LEN);
2240 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2241 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2243 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2244 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2245 tx_dsg->length = ISCSI_HDR_LEN;
2246 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2247 isert_cmd->tx_desc.num_sge = 2;
2249 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2251 isert_dbg("conn %p Posting Reject\n", isert_conn);
2253 return isert_post_response(isert_conn, isert_cmd);
2257 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2259 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2260 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2261 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2262 struct iscsi_text_rsp *hdr =
2263 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2267 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2268 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2273 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2276 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2277 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2278 void *txt_rsp_buf = cmd->buf_ptr;
2280 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2281 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2283 isert_cmd->pdu_buf_len = txt_rsp_len;
2284 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2285 tx_dsg->length = txt_rsp_len;
2286 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2287 isert_cmd->tx_desc.num_sge = 2;
2289 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2291 isert_dbg("conn %p Text Response\n", isert_conn);
2293 return isert_post_response(isert_conn, isert_cmd);
2297 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2298 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2299 u32 data_left, u32 offset)
2301 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2302 struct scatterlist *sg_start, *tmp_sg;
2303 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2304 u32 sg_off, page_off;
2305 int i = 0, sg_nents;
2307 sg_off = offset / PAGE_SIZE;
2308 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2309 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2310 page_off = offset % PAGE_SIZE;
2312 send_wr->sg_list = ib_sge;
2313 send_wr->num_sge = sg_nents;
2314 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2316 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2318 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2319 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2321 (unsigned long long)tmp_sg->dma_address,
2322 tmp_sg->length, page_off);
2324 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2325 ib_sge->length = min_t(u32, data_left,
2326 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2327 ib_sge->lkey = isert_conn->conn_mr->lkey;
2329 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2330 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2332 data_left -= ib_sge->length;
2334 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2337 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2338 send_wr->sg_list, send_wr->num_sge);
2344 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2345 struct isert_rdma_wr *wr)
2347 struct se_cmd *se_cmd = &cmd->se_cmd;
2348 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2349 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2350 struct isert_data_buf *data = &wr->data;
2351 struct ib_send_wr *send_wr;
2352 struct ib_sge *ib_sge;
2353 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2354 int ret = 0, i, ib_sge_cnt;
2356 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2358 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2359 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2360 se_cmd->t_data_nents, se_cmd->data_length,
2361 offset, wr->iser_ib_op, &wr->data);
2365 data_left = data->len;
2366 offset = data->offset;
2368 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2370 isert_warn("Unable to allocate ib_sge\n");
2374 wr->ib_sge = ib_sge;
2376 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2377 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2380 isert_dbg("Unable to allocate wr->send_wr\n");
2385 wr->isert_cmd = isert_cmd;
2386 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2388 for (i = 0; i < wr->send_wr_num; i++) {
2389 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2390 data_len = min(data_left, rdma_write_max);
2392 send_wr->send_flags = 0;
2393 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2394 send_wr->opcode = IB_WR_RDMA_WRITE;
2395 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2396 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2397 if (i + 1 == wr->send_wr_num)
2398 send_wr->next = &isert_cmd->tx_desc.send_wr;
2400 send_wr->next = &wr->send_wr[i + 1];
2402 send_wr->opcode = IB_WR_RDMA_READ;
2403 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2404 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2405 if (i + 1 == wr->send_wr_num)
2406 send_wr->send_flags = IB_SEND_SIGNALED;
2408 send_wr->next = &wr->send_wr[i + 1];
2411 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2412 send_wr, data_len, offset);
2413 ib_sge += ib_sge_cnt;
2416 va_offset += data_len;
2417 data_left -= data_len;
2422 isert_unmap_data_buf(isert_conn, data);
2428 isert_map_fr_pagelist(struct ib_device *ib_dev,
2429 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2431 u64 start_addr, end_addr, page, chunk_start = 0;
2432 struct scatterlist *tmp_sg;
2433 int i = 0, new_chunk, last_ent, n_pages;
2437 last_ent = sg_nents - 1;
2438 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2439 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2441 chunk_start = start_addr;
2442 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2444 isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
2445 i, (unsigned long long)tmp_sg->dma_address,
2448 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2454 page = chunk_start & PAGE_MASK;
2456 fr_pl[n_pages++] = page;
2457 isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
2460 } while (page < end_addr);
2467 isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2471 memset(inv_wr, 0, sizeof(*inv_wr));
2472 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
2473 inv_wr->opcode = IB_WR_LOCAL_INV;
2474 inv_wr->ex.invalidate_rkey = mr->rkey;
2477 rkey = ib_inc_rkey(mr->rkey);
2478 ib_update_fast_reg_key(mr, rkey);
2482 isert_fast_reg_mr(struct isert_conn *isert_conn,
2483 struct fast_reg_descriptor *fr_desc,
2484 struct isert_data_buf *mem,
2485 enum isert_indicator ind,
2488 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2490 struct ib_fast_reg_page_list *frpl;
2491 struct ib_send_wr fr_wr, inv_wr;
2492 struct ib_send_wr *bad_wr, *wr = NULL;
2493 int ret, pagelist_len;
2496 if (mem->dma_nents == 1) {
2497 sge->lkey = isert_conn->conn_mr->lkey;
2498 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2499 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2500 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2501 sge->addr, sge->length, sge->lkey);
2505 if (ind == ISERT_DATA_KEY_VALID) {
2506 /* Registering data buffer */
2507 mr = fr_desc->data_mr;
2508 frpl = fr_desc->data_frpl;
2510 /* Registering protection buffer */
2511 mr = fr_desc->pi_ctx->prot_mr;
2512 frpl = fr_desc->pi_ctx->prot_frpl;
2515 page_off = mem->offset % PAGE_SIZE;
2517 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2518 fr_desc, mem->nents, mem->offset);
2520 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2521 &frpl->page_list[0]);
2523 if (!(fr_desc->ind & ind)) {
2524 isert_inv_rkey(&inv_wr, mr);
2528 /* Prepare FASTREG WR */
2529 memset(&fr_wr, 0, sizeof(fr_wr));
2530 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2531 fr_wr.opcode = IB_WR_FAST_REG_MR;
2532 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2533 fr_wr.wr.fast_reg.page_list = frpl;
2534 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2535 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2536 fr_wr.wr.fast_reg.length = mem->len;
2537 fr_wr.wr.fast_reg.rkey = mr->rkey;
2538 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2545 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2547 isert_err("fast registration failed, ret:%d\n", ret);
2550 fr_desc->ind &= ~ind;
2552 sge->lkey = mr->lkey;
2553 sge->addr = frpl->page_list[0] + page_off;
2554 sge->length = mem->len;
2556 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2557 sge->addr, sge->length, sge->lkey);
2563 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2564 struct ib_sig_domain *domain)
2566 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2567 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2568 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2569 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2571 * At the moment we hard code those, but if in the future
2572 * the target core would like to use it, we will take it
2575 domain->sig.dif.apptag_check_mask = 0xffff;
2576 domain->sig.dif.app_escape = true;
2577 domain->sig.dif.ref_escape = true;
2578 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2579 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2580 domain->sig.dif.ref_remap = true;
2584 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2586 switch (se_cmd->prot_op) {
2587 case TARGET_PROT_DIN_INSERT:
2588 case TARGET_PROT_DOUT_STRIP:
2589 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2590 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2592 case TARGET_PROT_DOUT_INSERT:
2593 case TARGET_PROT_DIN_STRIP:
2594 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2595 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2597 case TARGET_PROT_DIN_PASS:
2598 case TARGET_PROT_DOUT_PASS:
2599 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2600 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2603 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2611 isert_set_prot_checks(u8 prot_checks)
2613 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2614 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2615 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2619 isert_reg_sig_mr(struct isert_conn *isert_conn,
2620 struct se_cmd *se_cmd,
2621 struct isert_rdma_wr *rdma_wr,
2622 struct fast_reg_descriptor *fr_desc)
2624 struct ib_send_wr sig_wr, inv_wr;
2625 struct ib_send_wr *bad_wr, *wr = NULL;
2626 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2627 struct ib_sig_attrs sig_attrs;
2630 memset(&sig_attrs, 0, sizeof(sig_attrs));
2631 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2635 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2637 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2638 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
2642 memset(&sig_wr, 0, sizeof(sig_wr));
2643 sig_wr.opcode = IB_WR_REG_SIG_MR;
2644 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2645 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
2647 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2648 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2649 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2650 if (se_cmd->t_prot_sg)
2651 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
2658 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2660 isert_err("fast registration failed, ret:%d\n", ret);
2663 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2665 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2666 rdma_wr->ib_sg[SIG].addr = 0;
2667 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
2668 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2669 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2671 * We have protection guards on the wire
2672 * so we need to set a larget transfer
2674 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
2676 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2677 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2678 rdma_wr->ib_sg[SIG].lkey);
2684 isert_handle_prot_cmd(struct isert_conn *isert_conn,
2685 struct isert_cmd *isert_cmd,
2686 struct isert_rdma_wr *wr)
2688 struct isert_device *device = isert_conn->conn_device;
2689 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2692 if (!wr->fr_desc->pi_ctx) {
2693 ret = isert_create_pi_ctx(wr->fr_desc,
2695 isert_conn->conn_pd);
2697 isert_err("conn %p failed to allocate pi_ctx\n",
2703 if (se_cmd->t_prot_sg) {
2704 ret = isert_map_data_buf(isert_conn, isert_cmd,
2706 se_cmd->t_prot_nents,
2707 se_cmd->prot_length,
2708 0, wr->iser_ib_op, &wr->prot);
2710 isert_err("conn %p failed to map protection buffer\n",
2715 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2716 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2717 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2719 isert_err("conn %p failed to fast reg mr\n",
2721 goto unmap_prot_cmd;
2725 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2727 isert_err("conn %p failed to fast reg mr\n",
2729 goto unmap_prot_cmd;
2731 wr->fr_desc->ind |= ISERT_PROTECTED;
2736 if (se_cmd->t_prot_sg)
2737 isert_unmap_data_buf(isert_conn, &wr->prot);
2743 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2744 struct isert_rdma_wr *wr)
2746 struct se_cmd *se_cmd = &cmd->se_cmd;
2747 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2748 struct isert_conn *isert_conn = conn->context;
2749 struct fast_reg_descriptor *fr_desc = NULL;
2750 struct ib_send_wr *send_wr;
2751 struct ib_sge *ib_sg;
2754 unsigned long flags;
2756 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2758 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2759 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2760 se_cmd->t_data_nents, se_cmd->data_length,
2761 offset, wr->iser_ib_op, &wr->data);
2765 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
2766 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2767 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2768 struct fast_reg_descriptor, list);
2769 list_del(&fr_desc->list);
2770 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2771 wr->fr_desc = fr_desc;
2774 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2775 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
2779 if (isert_prot_cmd(isert_conn, se_cmd)) {
2780 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
2784 ib_sg = &wr->ib_sg[SIG];
2786 ib_sg = &wr->ib_sg[DATA];
2789 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2790 wr->ib_sge = &wr->s_ib_sge;
2791 wr->send_wr_num = 1;
2792 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2793 wr->send_wr = &wr->s_send_wr;
2794 wr->isert_cmd = isert_cmd;
2796 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2797 send_wr->sg_list = &wr->s_ib_sge;
2798 send_wr->num_sge = 1;
2799 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2800 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2801 send_wr->opcode = IB_WR_RDMA_WRITE;
2802 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2803 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2804 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2805 0 : IB_SEND_SIGNALED;
2807 send_wr->opcode = IB_WR_RDMA_READ;
2808 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2809 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2810 send_wr->send_flags = IB_SEND_SIGNALED;
2817 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2818 list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
2819 spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
2821 isert_unmap_data_buf(isert_conn, &wr->data);
2827 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2829 struct se_cmd *se_cmd = &cmd->se_cmd;
2830 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2831 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2832 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2833 struct isert_device *device = isert_conn->conn_device;
2834 struct ib_send_wr *wr_failed;
2837 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2838 isert_cmd, se_cmd->data_length);
2840 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2841 rc = device->reg_rdma_mem(conn, cmd, wr);
2843 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2847 if (!isert_prot_cmd(isert_conn, se_cmd)) {
2849 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2851 isert_create_send_desc(isert_conn, isert_cmd,
2852 &isert_cmd->tx_desc);
2853 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2854 &isert_cmd->tx_desc.iscsi_header);
2855 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2856 isert_init_send_wr(isert_conn, isert_cmd,
2857 &isert_cmd->tx_desc.send_wr);
2858 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2859 wr->send_wr_num += 1;
2862 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2864 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2866 if (!isert_prot_cmd(isert_conn, se_cmd))
2867 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2868 "READ\n", isert_cmd);
2870 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2877 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2879 struct se_cmd *se_cmd = &cmd->se_cmd;
2880 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2881 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2882 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2883 struct isert_device *device = isert_conn->conn_device;
2884 struct ib_send_wr *wr_failed;
2887 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2888 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2889 wr->iser_ib_op = ISER_IB_RDMA_READ;
2890 rc = device->reg_rdma_mem(conn, cmd, wr);
2892 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2896 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2898 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2900 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2907 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2912 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2913 ret = isert_put_nopin(cmd, conn, false);
2916 isert_err("Unknown immediate state: 0x%02x\n", state);
2925 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2927 struct isert_conn *isert_conn = conn->context;
2931 case ISTATE_SEND_LOGOUTRSP:
2932 ret = isert_put_logout_rsp(cmd, conn);
2934 isert_conn->logout_posted = true;
2936 case ISTATE_SEND_NOPIN:
2937 ret = isert_put_nopin(cmd, conn, true);
2939 case ISTATE_SEND_TASKMGTRSP:
2940 ret = isert_put_tm_rsp(cmd, conn);
2942 case ISTATE_SEND_REJECT:
2943 ret = isert_put_reject(cmd, conn);
2945 case ISTATE_SEND_TEXTRSP:
2946 ret = isert_put_text_rsp(cmd, conn);
2948 case ISTATE_SEND_STATUS:
2950 * Special case for sending non GOOD SCSI status from TX thread
2951 * context during pre se_cmd excecution failure.
2953 ret = isert_put_response(conn, cmd);
2956 isert_err("Unknown response state: 0x%02x\n", state);
2965 isert_setup_id(struct isert_np *isert_np)
2967 struct iscsi_np *np = isert_np->np;
2968 struct rdma_cm_id *id;
2969 struct sockaddr *sa;
2972 sa = (struct sockaddr *)&np->np_sockaddr;
2973 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2975 id = rdma_create_id(isert_cma_handler, isert_np,
2976 RDMA_PS_TCP, IB_QPT_RC);
2978 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2982 isert_dbg("id %p context %p\n", id, id->context);
2984 ret = rdma_bind_addr(id, sa);
2986 isert_err("rdma_bind_addr() failed: %d\n", ret);
2990 ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
2992 isert_err("rdma_listen() failed: %d\n", ret);
2998 rdma_destroy_id(id);
3000 return ERR_PTR(ret);
3004 isert_setup_np(struct iscsi_np *np,
3005 struct __kernel_sockaddr_storage *ksockaddr)
3007 struct isert_np *isert_np;
3008 struct rdma_cm_id *isert_lid;
3011 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3013 isert_err("Unable to allocate struct isert_np\n");
3016 sema_init(&isert_np->np_sem, 0);
3017 mutex_init(&isert_np->np_accept_mutex);
3018 INIT_LIST_HEAD(&isert_np->np_accept_list);
3019 init_completion(&isert_np->np_login_comp);
3023 * Setup the np->np_sockaddr from the passed sockaddr setup
3024 * in iscsi_target_configfs.c code..
3026 memcpy(&np->np_sockaddr, ksockaddr,
3027 sizeof(struct __kernel_sockaddr_storage));
3029 isert_lid = isert_setup_id(isert_np);
3030 if (IS_ERR(isert_lid)) {
3031 ret = PTR_ERR(isert_lid);
3035 isert_np->np_cm_id = isert_lid;
3036 np->np_context = isert_np;
3047 isert_rdma_accept(struct isert_conn *isert_conn)
3049 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3050 struct rdma_conn_param cp;
3053 memset(&cp, 0, sizeof(struct rdma_conn_param));
3054 cp.initiator_depth = isert_conn->initiator_depth;
3056 cp.rnr_retry_count = 7;
3058 ret = rdma_accept(cm_id, &cp);
3060 isert_err("rdma_accept() failed with: %d\n", ret);
3068 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3070 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3073 isert_info("before login_req comp conn: %p\n", isert_conn);
3074 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3076 isert_err("isert_conn %p interrupted before got login req\n",
3080 reinit_completion(&isert_conn->login_req_comp);
3083 * For login requests after the first PDU, isert_rx_login_req() will
3084 * kick schedule_delayed_work(&conn->login_work) as the packet is
3085 * received, which turns this callback from iscsi_target_do_login_rx()
3088 if (!login->first_request)
3091 isert_rx_login_req(isert_conn);
3093 isert_info("before conn_login_comp conn: %p\n", conn);
3094 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3098 isert_info("processing login->req: %p\n", login->req);
3104 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3105 struct isert_conn *isert_conn)
3107 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
3108 struct rdma_route *cm_route = &cm_id->route;
3109 struct sockaddr_in *sock_in;
3110 struct sockaddr_in6 *sock_in6;
3112 conn->login_family = np->np_sockaddr.ss_family;
3114 if (np->np_sockaddr.ss_family == AF_INET6) {
3115 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3116 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3117 &sock_in6->sin6_addr.in6_u);
3118 conn->login_port = ntohs(sock_in6->sin6_port);
3120 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3121 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3122 &sock_in6->sin6_addr.in6_u);
3123 conn->local_port = ntohs(sock_in6->sin6_port);
3125 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3126 sprintf(conn->login_ip, "%pI4",
3127 &sock_in->sin_addr.s_addr);
3128 conn->login_port = ntohs(sock_in->sin_port);
3130 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3131 sprintf(conn->local_ip, "%pI4",
3132 &sock_in->sin_addr.s_addr);
3133 conn->local_port = ntohs(sock_in->sin_port);
3138 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3140 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3141 struct isert_conn *isert_conn;
3142 int max_accept = 0, ret;
3145 ret = down_interruptible(&isert_np->np_sem);
3146 if (ret || max_accept > 5)
3149 spin_lock_bh(&np->np_thread_lock);
3150 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3151 spin_unlock_bh(&np->np_thread_lock);
3152 isert_dbg("np_thread_state %d\n",
3153 np->np_thread_state);
3155 * No point in stalling here when np_thread
3156 * is in state RESET/SHUTDOWN/EXIT - bail
3160 spin_unlock_bh(&np->np_thread_lock);
3162 mutex_lock(&isert_np->np_accept_mutex);
3163 if (list_empty(&isert_np->np_accept_list)) {
3164 mutex_unlock(&isert_np->np_accept_mutex);
3168 isert_conn = list_first_entry(&isert_np->np_accept_list,
3169 struct isert_conn, conn_accept_node);
3170 list_del_init(&isert_conn->conn_accept_node);
3171 mutex_unlock(&isert_np->np_accept_mutex);
3173 conn->context = isert_conn;
3174 isert_conn->conn = conn;
3177 isert_set_conn_info(np, conn, isert_conn);
3179 isert_dbg("Processing isert_conn: %p\n", isert_conn);
3185 isert_free_np(struct iscsi_np *np)
3187 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3188 struct isert_conn *isert_conn, *n;
3190 if (isert_np->np_cm_id)
3191 rdma_destroy_id(isert_np->np_cm_id);
3194 * FIXME: At this point we don't have a good way to insure
3195 * that at this point we don't have hanging connections that
3196 * completed RDMA establishment but didn't start iscsi login
3197 * process. So work-around this by cleaning up what ever piled
3198 * up in np_accept_list.
3200 mutex_lock(&isert_np->np_accept_mutex);
3201 if (!list_empty(&isert_np->np_accept_list)) {
3202 isert_info("Still have isert connections, cleaning up...\n");
3203 list_for_each_entry_safe(isert_conn, n,
3204 &isert_np->np_accept_list,
3206 isert_info("cleaning isert_conn %p state (%d)\n",
3207 isert_conn, isert_conn->state);
3208 isert_connect_release(isert_conn);
3211 mutex_unlock(&isert_np->np_accept_mutex);
3213 np->np_context = NULL;
3217 static void isert_release_work(struct work_struct *work)
3219 struct isert_conn *isert_conn = container_of(work,
3223 isert_info("Starting release conn %p\n", isert_conn);
3225 wait_for_completion(&isert_conn->conn_wait);
3227 mutex_lock(&isert_conn->conn_mutex);
3228 isert_conn->state = ISER_CONN_DOWN;
3229 mutex_unlock(&isert_conn->conn_mutex);
3231 isert_info("Destroying conn %p\n", isert_conn);
3232 isert_put_conn(isert_conn);
3236 isert_wait4logout(struct isert_conn *isert_conn)
3238 struct iscsi_conn *conn = isert_conn->conn;
3240 isert_info("conn %p\n", isert_conn);
3242 if (isert_conn->logout_posted) {
3243 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
3244 wait_for_completion_timeout(&conn->conn_logout_comp,
3245 SECONDS_FOR_LOGOUT_COMP * HZ);
3250 isert_wait4cmds(struct iscsi_conn *conn)
3252 isert_info("iscsi_conn %p\n", conn);
3255 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3256 target_wait_for_sess_cmds(conn->sess->se_sess);
3261 isert_wait4flush(struct isert_conn *isert_conn)
3263 struct ib_recv_wr *bad_wr;
3265 isert_info("conn %p\n", isert_conn);
3267 init_completion(&isert_conn->conn_wait_comp_err);
3268 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3269 /* post an indication that all flush errors were consumed */
3270 if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
3271 isert_err("conn %p failed to post beacon", isert_conn);
3275 wait_for_completion(&isert_conn->conn_wait_comp_err);
3278 static void isert_wait_conn(struct iscsi_conn *conn)
3280 struct isert_conn *isert_conn = conn->context;
3282 isert_info("Starting conn %p\n", isert_conn);
3284 mutex_lock(&isert_conn->conn_mutex);
3286 * Only wait for conn_wait_comp_err if the isert_conn made it
3287 * into full feature phase..
3289 if (isert_conn->state == ISER_CONN_INIT) {
3290 mutex_unlock(&isert_conn->conn_mutex);
3293 isert_conn_terminate(isert_conn);
3294 mutex_unlock(&isert_conn->conn_mutex);
3296 isert_wait4cmds(conn);
3297 isert_wait4flush(isert_conn);
3298 isert_wait4logout(isert_conn);
3300 INIT_WORK(&isert_conn->release_work, isert_release_work);
3301 queue_work(isert_release_wq, &isert_conn->release_work);
3304 static void isert_free_conn(struct iscsi_conn *conn)
3306 struct isert_conn *isert_conn = conn->context;
3308 isert_put_conn(isert_conn);
3311 static struct iscsit_transport iser_target_transport = {
3313 .transport_type = ISCSI_INFINIBAND,
3314 .priv_size = sizeof(struct isert_cmd),
3315 .owner = THIS_MODULE,
3316 .iscsit_setup_np = isert_setup_np,
3317 .iscsit_accept_np = isert_accept_np,
3318 .iscsit_free_np = isert_free_np,
3319 .iscsit_wait_conn = isert_wait_conn,
3320 .iscsit_free_conn = isert_free_conn,
3321 .iscsit_get_login_rx = isert_get_login_rx,
3322 .iscsit_put_login_tx = isert_put_login_tx,
3323 .iscsit_immediate_queue = isert_immediate_queue,
3324 .iscsit_response_queue = isert_response_queue,
3325 .iscsit_get_dataout = isert_get_dataout,
3326 .iscsit_queue_data_in = isert_put_datain,
3327 .iscsit_queue_status = isert_put_response,
3328 .iscsit_aborted_task = isert_aborted_task,
3329 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
3332 static int __init isert_init(void)
3336 isert_comp_wq = alloc_workqueue("isert_comp_wq",
3337 WQ_UNBOUND | WQ_HIGHPRI, 0);
3338 if (!isert_comp_wq) {
3339 isert_err("Unable to allocate isert_comp_wq\n");
3344 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3345 WQ_UNBOUND_MAX_ACTIVE);
3346 if (!isert_release_wq) {
3347 isert_err("Unable to allocate isert_release_wq\n");
3349 goto destroy_comp_wq;
3352 iscsit_register_transport(&iser_target_transport);
3353 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3358 destroy_workqueue(isert_comp_wq);
3363 static void __exit isert_exit(void)
3365 flush_scheduled_work();
3366 destroy_workqueue(isert_release_wq);
3367 destroy_workqueue(isert_comp_wq);
3368 iscsit_unregister_transport(&iser_target_transport);
3369 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3372 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3373 MODULE_VERSION("0.1");
3374 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3375 MODULE_LICENSE("GPL");
3377 module_init(isert_init);
3378 module_exit(isert_exit);