2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
45 #include <linux/atomic.h>
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
52 #include <scsi/scsi_transport_srp.h>
56 #define DRV_NAME "ib_srp"
57 #define PFX DRV_NAME ": "
58 #define DRV_VERSION "2.0"
59 #define DRV_RELDATE "July 26, 2015"
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRV_VERSION);
65 MODULE_INFO(release_date, DRV_RELDATE);
67 static unsigned int srp_sg_tablesize;
68 static unsigned int cmd_sg_entries;
69 static unsigned int indirect_sg_entries;
70 static bool allow_ext_sg;
71 static bool prefer_fr = true;
72 static bool register_always = true;
73 static int topspin_workarounds = 1;
75 module_param(srp_sg_tablesize, uint, 0444);
76 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
78 module_param(cmd_sg_entries, uint, 0444);
79 MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
82 module_param(indirect_sg_entries, uint, 0444);
83 MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
86 module_param(allow_ext_sg, bool, 0444);
87 MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
90 module_param(topspin_workarounds, int, 0444);
91 MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
94 module_param(prefer_fr, bool, 0444);
95 MODULE_PARM_DESC(prefer_fr,
96 "Whether to use fast registration if both FMR and fast registration are supported");
98 module_param(register_always, bool, 0444);
99 MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
102 static const struct kernel_param_ops srp_tmo_ops;
104 static int srp_reconnect_delay = 10;
105 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
107 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
109 static int srp_fast_io_fail_tmo = 15;
110 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
112 MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
117 static int srp_dev_loss_tmo = 600;
118 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
120 MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
128 static unsigned ch_count;
129 module_param(ch_count, uint, 0444);
130 MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
133 static void srp_add_one(struct ib_device *device);
134 static void srp_remove_one(struct ib_device *device, void *client_data);
135 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
137 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139 static struct scsi_transport_template *ib_srp_transport_template;
140 static struct workqueue_struct *srp_remove_wq;
142 static struct ib_client srp_client = {
145 .remove = srp_remove_one
148 static struct ib_sa_client srp_sa_client;
150 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152 int tmo = *(int *)kp->arg;
155 return sprintf(buffer, "%d", tmo);
157 return sprintf(buffer, "off");
160 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
164 res = srp_parse_tmo(&tmo, val);
168 if (kp->arg == &srp_reconnect_delay)
169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
171 else if (kp->arg == &srp_fast_io_fail_tmo)
172 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
174 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
178 *(int *)kp->arg = tmo;
184 static const struct kernel_param_ops srp_tmo_ops = {
189 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
191 return (struct srp_target_port *) host->hostdata;
194 static const char *srp_target_info(struct Scsi_Host *host)
196 return host_to_target(host)->target_name;
199 static int srp_target_is_topspin(struct srp_target_port *target)
201 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
202 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
204 return topspin_workarounds &&
205 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
209 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
211 enum dma_data_direction direction)
215 iu = kmalloc(sizeof *iu, gfp_mask);
219 iu->buf = kzalloc(size, gfp_mask);
223 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
225 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
229 iu->direction = direction;
241 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
246 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
252 static void srp_qp_event(struct ib_event *event, void *context)
254 pr_debug("QP event %s (%d)\n",
255 ib_event_msg(event->event), event->event);
258 static int srp_init_qp(struct srp_target_port *target,
261 struct ib_qp_attr *attr;
264 attr = kmalloc(sizeof *attr, GFP_KERNEL);
268 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269 target->srp_host->port,
270 be16_to_cpu(target->pkey),
275 attr->qp_state = IB_QPS_INIT;
276 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277 IB_ACCESS_REMOTE_WRITE);
278 attr->port_num = target->srp_host->port;
280 ret = ib_modify_qp(qp, attr,
291 static int srp_new_cm_id(struct srp_rdma_ch *ch)
293 struct srp_target_port *target = ch->target;
294 struct ib_cm_id *new_cm_id;
296 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
298 if (IS_ERR(new_cm_id))
299 return PTR_ERR(new_cm_id);
302 ib_destroy_cm_id(ch->cm_id);
303 ch->cm_id = new_cm_id;
304 ch->path.sgid = target->sgid;
305 ch->path.dgid = target->orig_dgid;
306 ch->path.pkey = target->pkey;
307 ch->path.service_id = target->service_id;
312 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
314 struct srp_device *dev = target->srp_host->srp_dev;
315 struct ib_fmr_pool_param fmr_param;
317 memset(&fmr_param, 0, sizeof(fmr_param));
318 fmr_param.pool_size = target->scsi_host->can_queue;
319 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
321 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322 fmr_param.page_shift = ilog2(dev->mr_page_size);
323 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
324 IB_ACCESS_REMOTE_WRITE |
325 IB_ACCESS_REMOTE_READ);
327 return ib_create_fmr_pool(dev->pd, &fmr_param);
331 * srp_destroy_fr_pool() - free the resources owned by a pool
332 * @pool: Fast registration pool to be destroyed.
334 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
337 struct srp_fr_desc *d;
342 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
344 ib_free_fast_reg_page_list(d->frpl);
352 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353 * @device: IB device to allocate fast registration descriptors for.
354 * @pd: Protection domain associated with the FR descriptors.
355 * @pool_size: Number of descriptors to allocate.
356 * @max_page_list_len: Maximum fast registration work request page list length.
358 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359 struct ib_pd *pd, int pool_size,
360 int max_page_list_len)
362 struct srp_fr_pool *pool;
363 struct srp_fr_desc *d;
365 struct ib_fast_reg_page_list *frpl;
366 int i, ret = -EINVAL;
371 pool = kzalloc(sizeof(struct srp_fr_pool) +
372 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
375 pool->size = pool_size;
376 pool->max_page_list_len = max_page_list_len;
377 spin_lock_init(&pool->lock);
378 INIT_LIST_HEAD(&pool->free_list);
380 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
381 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
388 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
394 list_add_tail(&d->entry, &pool->free_list);
401 srp_destroy_fr_pool(pool);
409 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410 * @pool: Pool to obtain descriptor from.
412 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
414 struct srp_fr_desc *d = NULL;
417 spin_lock_irqsave(&pool->lock, flags);
418 if (!list_empty(&pool->free_list)) {
419 d = list_first_entry(&pool->free_list, typeof(*d), entry);
422 spin_unlock_irqrestore(&pool->lock, flags);
428 * srp_fr_pool_put() - put an FR descriptor back in the free list
429 * @pool: Pool the descriptor was allocated from.
430 * @desc: Pointer to an array of fast registration descriptor pointers.
431 * @n: Number of descriptors to put back.
433 * Note: The caller must already have queued an invalidation request for
434 * desc->mr->rkey before calling this function.
436 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
442 spin_lock_irqsave(&pool->lock, flags);
443 for (i = 0; i < n; i++)
444 list_add(&desc[i]->entry, &pool->free_list);
445 spin_unlock_irqrestore(&pool->lock, flags);
448 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
450 struct srp_device *dev = target->srp_host->srp_dev;
452 return srp_create_fr_pool(dev->dev, dev->pd,
453 target->scsi_host->can_queue,
454 dev->max_pages_per_mr);
458 * srp_destroy_qp() - destroy an RDMA queue pair
459 * @ch: SRP RDMA channel.
461 * Change a queue pair into the error state and wait until all receive
462 * completions have been processed before destroying it. This avoids that
463 * the receive completion handler can access the queue pair while it is
466 static void srp_destroy_qp(struct srp_rdma_ch *ch)
468 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470 struct ib_recv_wr *bad_wr;
473 /* Destroying a QP and reusing ch->done is only safe if not connected */
474 WARN_ON_ONCE(ch->connected);
476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
481 init_completion(&ch->done);
482 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
485 wait_for_completion(&ch->done);
488 ib_destroy_qp(ch->qp);
491 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
493 struct srp_target_port *target = ch->target;
494 struct srp_device *dev = target->srp_host->srp_dev;
495 struct ib_qp_init_attr *init_attr;
496 struct ib_cq *recv_cq, *send_cq;
498 struct ib_fmr_pool *fmr_pool = NULL;
499 struct srp_fr_pool *fr_pool = NULL;
500 const int m = 1 + dev->use_fast_reg;
501 struct ib_cq_init_attr cq_attr = {};
504 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
508 /* + 1 for SRP_LAST_WR_ID */
509 cq_attr.cqe = target->queue_size + 1;
510 cq_attr.comp_vector = ch->comp_vector;
511 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
513 if (IS_ERR(recv_cq)) {
514 ret = PTR_ERR(recv_cq);
518 cq_attr.cqe = m * target->queue_size;
519 cq_attr.comp_vector = ch->comp_vector;
520 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
522 if (IS_ERR(send_cq)) {
523 ret = PTR_ERR(send_cq);
527 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
529 init_attr->event_handler = srp_qp_event;
530 init_attr->cap.max_send_wr = m * target->queue_size;
531 init_attr->cap.max_recv_wr = target->queue_size + 1;
532 init_attr->cap.max_recv_sge = 1;
533 init_attr->cap.max_send_sge = 1;
534 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
535 init_attr->qp_type = IB_QPT_RC;
536 init_attr->send_cq = send_cq;
537 init_attr->recv_cq = recv_cq;
539 qp = ib_create_qp(dev->pd, init_attr);
545 ret = srp_init_qp(target, qp);
549 if (dev->use_fast_reg) {
550 fr_pool = srp_alloc_fr_pool(target);
551 if (IS_ERR(fr_pool)) {
552 ret = PTR_ERR(fr_pool);
553 shost_printk(KERN_WARNING, target->scsi_host, PFX
554 "FR pool allocation failed (%d)\n", ret);
558 srp_destroy_fr_pool(ch->fr_pool);
559 ch->fr_pool = fr_pool;
560 } else if (dev->use_fmr) {
561 fmr_pool = srp_alloc_fmr_pool(target);
562 if (IS_ERR(fmr_pool)) {
563 ret = PTR_ERR(fmr_pool);
564 shost_printk(KERN_WARNING, target->scsi_host, PFX
565 "FMR pool allocation failed (%d)\n", ret);
569 ib_destroy_fmr_pool(ch->fmr_pool);
570 ch->fmr_pool = fmr_pool;
576 ib_destroy_cq(ch->recv_cq);
578 ib_destroy_cq(ch->send_cq);
581 ch->recv_cq = recv_cq;
582 ch->send_cq = send_cq;
591 ib_destroy_cq(send_cq);
594 ib_destroy_cq(recv_cq);
602 * Note: this function may be called without srp_alloc_iu_bufs() having been
603 * invoked. Hence the ch->[rt]x_ring checks.
605 static void srp_free_ch_ib(struct srp_target_port *target,
606 struct srp_rdma_ch *ch)
608 struct srp_device *dev = target->srp_host->srp_dev;
615 ib_destroy_cm_id(ch->cm_id);
619 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
623 if (dev->use_fast_reg) {
625 srp_destroy_fr_pool(ch->fr_pool);
626 } else if (dev->use_fmr) {
628 ib_destroy_fmr_pool(ch->fmr_pool);
631 ib_destroy_cq(ch->send_cq);
632 ib_destroy_cq(ch->recv_cq);
635 * Avoid that the SCSI error handler tries to use this channel after
636 * it has been freed. The SCSI error handler can namely continue
637 * trying to perform recovery actions after scsi_remove_host()
643 ch->send_cq = ch->recv_cq = NULL;
646 for (i = 0; i < target->queue_size; ++i)
647 srp_free_iu(target->srp_host, ch->rx_ring[i]);
652 for (i = 0; i < target->queue_size; ++i)
653 srp_free_iu(target->srp_host, ch->tx_ring[i]);
659 static void srp_path_rec_completion(int status,
660 struct ib_sa_path_rec *pathrec,
663 struct srp_rdma_ch *ch = ch_ptr;
664 struct srp_target_port *target = ch->target;
668 shost_printk(KERN_ERR, target->scsi_host,
669 PFX "Got failed path rec status %d\n", status);
675 static int srp_lookup_path(struct srp_rdma_ch *ch)
677 struct srp_target_port *target = ch->target;
680 ch->path.numb_path = 1;
682 init_completion(&ch->done);
684 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
685 target->srp_host->srp_dev->dev,
686 target->srp_host->port,
688 IB_SA_PATH_REC_SERVICE_ID |
689 IB_SA_PATH_REC_DGID |
690 IB_SA_PATH_REC_SGID |
691 IB_SA_PATH_REC_NUMB_PATH |
693 SRP_PATH_REC_TIMEOUT_MS,
695 srp_path_rec_completion,
696 ch, &ch->path_query);
697 if (ch->path_query_id < 0)
698 return ch->path_query_id;
700 ret = wait_for_completion_interruptible(&ch->done);
705 shost_printk(KERN_WARNING, target->scsi_host,
706 PFX "Path record query failed\n");
711 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
713 struct srp_target_port *target = ch->target;
715 struct ib_cm_req_param param;
716 struct srp_login_req priv;
720 req = kzalloc(sizeof *req, GFP_KERNEL);
724 req->param.primary_path = &ch->path;
725 req->param.alternate_path = NULL;
726 req->param.service_id = target->service_id;
727 req->param.qp_num = ch->qp->qp_num;
728 req->param.qp_type = ch->qp->qp_type;
729 req->param.private_data = &req->priv;
730 req->param.private_data_len = sizeof req->priv;
731 req->param.flow_control = 1;
733 get_random_bytes(&req->param.starting_psn, 4);
734 req->param.starting_psn &= 0xffffff;
737 * Pick some arbitrary defaults here; we could make these
738 * module parameters if anyone cared about setting them.
740 req->param.responder_resources = 4;
741 req->param.remote_cm_response_timeout = 20;
742 req->param.local_cm_response_timeout = 20;
743 req->param.retry_count = target->tl_retry_count;
744 req->param.rnr_retry_count = 7;
745 req->param.max_cm_retries = 15;
747 req->priv.opcode = SRP_LOGIN_REQ;
749 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
750 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
751 SRP_BUF_FORMAT_INDIRECT);
752 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
753 SRP_MULTICHAN_SINGLE);
755 * In the published SRP specification (draft rev. 16a), the
756 * port identifier format is 8 bytes of ID extension followed
757 * by 8 bytes of GUID. Older drafts put the two halves in the
758 * opposite order, so that the GUID comes first.
760 * Targets conforming to these obsolete drafts can be
761 * recognized by the I/O Class they report.
763 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
764 memcpy(req->priv.initiator_port_id,
765 &target->sgid.global.interface_id, 8);
766 memcpy(req->priv.initiator_port_id + 8,
767 &target->initiator_ext, 8);
768 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
769 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
771 memcpy(req->priv.initiator_port_id,
772 &target->initiator_ext, 8);
773 memcpy(req->priv.initiator_port_id + 8,
774 &target->sgid.global.interface_id, 8);
775 memcpy(req->priv.target_port_id, &target->id_ext, 8);
776 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
780 * Topspin/Cisco SRP targets will reject our login unless we
781 * zero out the first 8 bytes of our initiator port ID and set
782 * the second 8 bytes to the local node GUID.
784 if (srp_target_is_topspin(target)) {
785 shost_printk(KERN_DEBUG, target->scsi_host,
786 PFX "Topspin/Cisco initiator port ID workaround "
787 "activated for target GUID %016llx\n",
788 be64_to_cpu(target->ioc_guid));
789 memset(req->priv.initiator_port_id, 0, 8);
790 memcpy(req->priv.initiator_port_id + 8,
791 &target->srp_host->srp_dev->dev->node_guid, 8);
794 status = ib_send_cm_req(ch->cm_id, &req->param);
801 static bool srp_queue_remove_work(struct srp_target_port *target)
803 bool changed = false;
805 spin_lock_irq(&target->lock);
806 if (target->state != SRP_TARGET_REMOVED) {
807 target->state = SRP_TARGET_REMOVED;
810 spin_unlock_irq(&target->lock);
813 queue_work(srp_remove_wq, &target->remove_work);
818 static void srp_disconnect_target(struct srp_target_port *target)
820 struct srp_rdma_ch *ch;
823 /* XXX should send SRP_I_LOGOUT request */
825 for (i = 0; i < target->ch_count; i++) {
827 ch->connected = false;
828 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
829 shost_printk(KERN_DEBUG, target->scsi_host,
830 PFX "Sending CM DREQ failed\n");
835 static void srp_free_req_data(struct srp_target_port *target,
836 struct srp_rdma_ch *ch)
838 struct srp_device *dev = target->srp_host->srp_dev;
839 struct ib_device *ibdev = dev->dev;
840 struct srp_request *req;
846 for (i = 0; i < target->req_ring_size; ++i) {
847 req = &ch->req_ring[i];
848 if (dev->use_fast_reg)
851 kfree(req->fmr_list);
852 kfree(req->map_page);
853 if (req->indirect_dma_addr) {
854 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
855 target->indirect_size,
858 kfree(req->indirect_desc);
865 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
867 struct srp_target_port *target = ch->target;
868 struct srp_device *srp_dev = target->srp_host->srp_dev;
869 struct ib_device *ibdev = srp_dev->dev;
870 struct srp_request *req;
873 int i, ret = -ENOMEM;
875 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
880 for (i = 0; i < target->req_ring_size; ++i) {
881 req = &ch->req_ring[i];
882 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
886 if (srp_dev->use_fast_reg)
887 req->fr_list = mr_list;
889 req->fmr_list = mr_list;
890 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
891 sizeof(void *), GFP_KERNEL);
894 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
895 if (!req->indirect_desc)
898 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
899 target->indirect_size,
901 if (ib_dma_mapping_error(ibdev, dma_addr))
904 req->indirect_dma_addr = dma_addr;
913 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
914 * @shost: SCSI host whose attributes to remove from sysfs.
916 * Note: Any attributes defined in the host template and that did not exist
917 * before invocation of this function will be ignored.
919 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
921 struct device_attribute **attr;
923 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
924 device_remove_file(&shost->shost_dev, *attr);
927 static void srp_remove_target(struct srp_target_port *target)
929 struct srp_rdma_ch *ch;
932 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
934 srp_del_scsi_host_attr(target->scsi_host);
935 srp_rport_get(target->rport);
936 srp_remove_host(target->scsi_host);
937 scsi_remove_host(target->scsi_host);
938 srp_stop_rport_timers(target->rport);
939 srp_disconnect_target(target);
940 for (i = 0; i < target->ch_count; i++) {
942 srp_free_ch_ib(target, ch);
944 cancel_work_sync(&target->tl_err_work);
945 srp_rport_put(target->rport);
946 for (i = 0; i < target->ch_count; i++) {
948 srp_free_req_data(target, ch);
953 spin_lock(&target->srp_host->target_lock);
954 list_del(&target->list);
955 spin_unlock(&target->srp_host->target_lock);
957 scsi_host_put(target->scsi_host);
960 static void srp_remove_work(struct work_struct *work)
962 struct srp_target_port *target =
963 container_of(work, struct srp_target_port, remove_work);
965 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
967 srp_remove_target(target);
970 static void srp_rport_delete(struct srp_rport *rport)
972 struct srp_target_port *target = rport->lld_data;
974 srp_queue_remove_work(target);
978 * srp_connected_ch() - number of connected channels
979 * @target: SRP target port.
981 static int srp_connected_ch(struct srp_target_port *target)
985 for (i = 0; i < target->ch_count; i++)
986 c += target->ch[i].connected;
991 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
993 struct srp_target_port *target = ch->target;
996 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
998 ret = srp_lookup_path(ch);
1003 init_completion(&ch->done);
1004 ret = srp_send_req(ch, multich);
1007 ret = wait_for_completion_interruptible(&ch->done);
1012 * The CM event handling code will set status to
1013 * SRP_PORT_REDIRECT if we get a port redirect REJ
1014 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1015 * redirect REJ back.
1017 switch (ch->status) {
1019 ch->connected = true;
1022 case SRP_PORT_REDIRECT:
1023 ret = srp_lookup_path(ch);
1028 case SRP_DLID_REDIRECT:
1031 case SRP_STALE_CONN:
1032 shost_printk(KERN_ERR, target->scsi_host, PFX
1033 "giving up on stale connection\n");
1034 ch->status = -ECONNRESET;
1043 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1045 struct ib_send_wr *bad_wr;
1046 struct ib_send_wr wr = {
1047 .opcode = IB_WR_LOCAL_INV,
1048 .wr_id = LOCAL_INV_WR_ID_MASK,
1052 .ex.invalidate_rkey = rkey,
1055 return ib_post_send(ch->qp, &wr, &bad_wr);
1058 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1059 struct srp_rdma_ch *ch,
1060 struct srp_request *req)
1062 struct srp_target_port *target = ch->target;
1063 struct srp_device *dev = target->srp_host->srp_dev;
1064 struct ib_device *ibdev = dev->dev;
1067 if (!scsi_sglist(scmnd) ||
1068 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1069 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1072 if (dev->use_fast_reg) {
1073 struct srp_fr_desc **pfr;
1075 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1076 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1078 shost_printk(KERN_ERR, target->scsi_host, PFX
1079 "Queueing INV WR for rkey %#x failed (%d)\n",
1080 (*pfr)->mr->rkey, res);
1081 queue_work(system_long_wq,
1082 &target->tl_err_work);
1086 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1088 } else if (dev->use_fmr) {
1089 struct ib_pool_fmr **pfmr;
1091 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1092 ib_fmr_pool_unmap(*pfmr);
1095 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1096 scmnd->sc_data_direction);
1100 * srp_claim_req - Take ownership of the scmnd associated with a request.
1101 * @ch: SRP RDMA channel.
1102 * @req: SRP request.
1103 * @sdev: If not NULL, only take ownership for this SCSI device.
1104 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1105 * ownership of @req->scmnd if it equals @scmnd.
1108 * Either NULL or a pointer to the SCSI command the caller became owner of.
1110 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1111 struct srp_request *req,
1112 struct scsi_device *sdev,
1113 struct scsi_cmnd *scmnd)
1115 unsigned long flags;
1117 spin_lock_irqsave(&ch->lock, flags);
1119 (!sdev || req->scmnd->device == sdev) &&
1120 (!scmnd || req->scmnd == scmnd)) {
1126 spin_unlock_irqrestore(&ch->lock, flags);
1132 * srp_free_req() - Unmap data and add request to the free request list.
1133 * @ch: SRP RDMA channel.
1134 * @req: Request to be freed.
1135 * @scmnd: SCSI command associated with @req.
1136 * @req_lim_delta: Amount to be added to @target->req_lim.
1138 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1139 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1141 unsigned long flags;
1143 srp_unmap_data(scmnd, ch, req);
1145 spin_lock_irqsave(&ch->lock, flags);
1146 ch->req_lim += req_lim_delta;
1147 spin_unlock_irqrestore(&ch->lock, flags);
1150 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1151 struct scsi_device *sdev, int result)
1153 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1156 srp_free_req(ch, req, scmnd, 0);
1157 scmnd->result = result;
1158 scmnd->scsi_done(scmnd);
1162 static void srp_terminate_io(struct srp_rport *rport)
1164 struct srp_target_port *target = rport->lld_data;
1165 struct srp_rdma_ch *ch;
1166 struct Scsi_Host *shost = target->scsi_host;
1167 struct scsi_device *sdev;
1171 * Invoking srp_terminate_io() while srp_queuecommand() is running
1172 * is not safe. Hence the warning statement below.
1174 shost_for_each_device(sdev, shost)
1175 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1177 for (i = 0; i < target->ch_count; i++) {
1178 ch = &target->ch[i];
1180 for (j = 0; j < target->req_ring_size; ++j) {
1181 struct srp_request *req = &ch->req_ring[j];
1183 srp_finish_req(ch, req, NULL,
1184 DID_TRANSPORT_FAILFAST << 16);
1190 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1191 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1192 * srp_reset_device() or srp_reset_host() calls will occur while this function
1193 * is in progress. One way to realize that is not to call this function
1194 * directly but to call srp_reconnect_rport() instead since that last function
1195 * serializes calls of this function via rport->mutex and also blocks
1196 * srp_queuecommand() calls before invoking this function.
1198 static int srp_rport_reconnect(struct srp_rport *rport)
1200 struct srp_target_port *target = rport->lld_data;
1201 struct srp_rdma_ch *ch;
1203 bool multich = false;
1205 srp_disconnect_target(target);
1207 if (target->state == SRP_TARGET_SCANNING)
1211 * Now get a new local CM ID so that we avoid confusing the target in
1212 * case things are really fouled up. Doing so also ensures that all CM
1213 * callbacks will have finished before a new QP is allocated.
1215 for (i = 0; i < target->ch_count; i++) {
1216 ch = &target->ch[i];
1217 ret += srp_new_cm_id(ch);
1219 for (i = 0; i < target->ch_count; i++) {
1220 ch = &target->ch[i];
1221 for (j = 0; j < target->req_ring_size; ++j) {
1222 struct srp_request *req = &ch->req_ring[j];
1224 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1227 for (i = 0; i < target->ch_count; i++) {
1228 ch = &target->ch[i];
1230 * Whether or not creating a new CM ID succeeded, create a new
1231 * QP. This guarantees that all completion callback function
1232 * invocations have finished before request resetting starts.
1234 ret += srp_create_ch_ib(ch);
1236 INIT_LIST_HEAD(&ch->free_tx);
1237 for (j = 0; j < target->queue_size; ++j)
1238 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1241 target->qp_in_error = false;
1243 for (i = 0; i < target->ch_count; i++) {
1244 ch = &target->ch[i];
1247 ret = srp_connect_ch(ch, multich);
1252 shost_printk(KERN_INFO, target->scsi_host,
1253 PFX "reconnect succeeded\n");
1258 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1259 unsigned int dma_len, u32 rkey)
1261 struct srp_direct_buf *desc = state->desc;
1263 WARN_ON_ONCE(!dma_len);
1265 desc->va = cpu_to_be64(dma_addr);
1266 desc->key = cpu_to_be32(rkey);
1267 desc->len = cpu_to_be32(dma_len);
1269 state->total_len += dma_len;
1274 static int srp_map_finish_fmr(struct srp_map_state *state,
1275 struct srp_rdma_ch *ch)
1277 struct srp_target_port *target = ch->target;
1278 struct srp_device *dev = target->srp_host->srp_dev;
1279 struct ib_pool_fmr *fmr;
1282 if (state->fmr.next >= state->fmr.end)
1285 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1286 state->npages, io_addr);
1288 return PTR_ERR(fmr);
1290 *state->fmr.next++ = fmr;
1293 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1294 state->dma_len, fmr->fmr->rkey);
1299 static int srp_map_finish_fr(struct srp_map_state *state,
1300 struct srp_rdma_ch *ch)
1302 struct srp_target_port *target = ch->target;
1303 struct srp_device *dev = target->srp_host->srp_dev;
1304 struct ib_send_wr *bad_wr;
1305 struct ib_send_wr wr;
1306 struct srp_fr_desc *desc;
1309 if (state->fr.next >= state->fr.end)
1312 desc = srp_fr_pool_get(ch->fr_pool);
1316 rkey = ib_inc_rkey(desc->mr->rkey);
1317 ib_update_fast_reg_key(desc->mr, rkey);
1319 memcpy(desc->frpl->page_list, state->pages,
1320 sizeof(state->pages[0]) * state->npages);
1322 memset(&wr, 0, sizeof(wr));
1323 wr.opcode = IB_WR_FAST_REG_MR;
1324 wr.wr_id = FAST_REG_WR_ID_MASK;
1325 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1326 wr.wr.fast_reg.page_list = desc->frpl;
1327 wr.wr.fast_reg.page_list_len = state->npages;
1328 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1329 wr.wr.fast_reg.length = state->dma_len;
1330 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1331 IB_ACCESS_REMOTE_READ |
1332 IB_ACCESS_REMOTE_WRITE);
1333 wr.wr.fast_reg.rkey = desc->mr->lkey;
1335 *state->fr.next++ = desc;
1338 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1341 return ib_post_send(ch->qp, &wr, &bad_wr);
1344 static int srp_finish_mapping(struct srp_map_state *state,
1345 struct srp_rdma_ch *ch)
1347 struct srp_target_port *target = ch->target;
1348 struct srp_device *dev = target->srp_host->srp_dev;
1351 WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr);
1353 if (state->npages == 0)
1356 if (state->npages == 1 && target->global_mr)
1357 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1358 target->global_mr->rkey);
1360 ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
1361 srp_map_finish_fmr(state, ch);
1371 static int srp_map_sg_entry(struct srp_map_state *state,
1372 struct srp_rdma_ch *ch,
1373 struct scatterlist *sg, int sg_index)
1375 struct srp_target_port *target = ch->target;
1376 struct srp_device *dev = target->srp_host->srp_dev;
1377 struct ib_device *ibdev = dev->dev;
1378 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1379 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1380 unsigned int len = 0;
1383 WARN_ON_ONCE(!dma_len);
1386 unsigned offset = dma_addr & ~dev->mr_page_mask;
1387 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1388 ret = srp_finish_mapping(state, ch);
1393 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1396 state->base_dma_addr = dma_addr;
1397 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1398 state->dma_len += len;
1404 * If the last entry of the MR wasn't a full page, then we need to
1405 * close it out and start a new one -- we can only merge at page
1409 if (len != dev->mr_page_size)
1410 ret = srp_finish_mapping(state, ch);
1414 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1415 struct srp_request *req, struct scatterlist *scat,
1418 struct srp_target_port *target = ch->target;
1419 struct srp_device *dev = target->srp_host->srp_dev;
1420 struct scatterlist *sg;
1423 state->desc = req->indirect_desc;
1424 state->pages = req->map_page;
1425 if (dev->use_fast_reg) {
1426 state->fr.next = req->fr_list;
1427 state->fr.end = req->fr_list + target->cmd_sg_cnt;
1428 } else if (dev->use_fmr) {
1429 state->fmr.next = req->fmr_list;
1430 state->fmr.end = req->fmr_list + target->cmd_sg_cnt;
1433 if (dev->use_fast_reg || dev->use_fmr) {
1434 for_each_sg(scat, sg, count, i) {
1435 ret = srp_map_sg_entry(state, ch, sg, i);
1439 ret = srp_finish_mapping(state, ch);
1443 for_each_sg(scat, sg, count, i) {
1444 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1445 ib_sg_dma_len(dev->dev, sg),
1446 target->global_mr->rkey);
1450 req->nmdesc = state->nmdesc;
1458 * Register the indirect data buffer descriptor with the HCA.
1460 * Note: since the indirect data buffer descriptor has been allocated with
1461 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1464 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1465 void **next_mr, void **end_mr, u32 idb_len,
1468 struct srp_target_port *target = ch->target;
1469 struct srp_device *dev = target->srp_host->srp_dev;
1470 struct srp_map_state state;
1471 struct srp_direct_buf idb_desc;
1475 memset(&state, 0, sizeof(state));
1476 memset(&idb_desc, 0, sizeof(idb_desc));
1477 state.gen.next = next_mr;
1478 state.gen.end = end_mr;
1479 state.desc = &idb_desc;
1480 state.pages = idb_pages;
1481 state.pages[0] = (req->indirect_dma_addr &
1484 state.base_dma_addr = req->indirect_dma_addr;
1485 state.dma_len = idb_len;
1486 ret = srp_finish_mapping(&state, ch);
1490 *idb_rkey = idb_desc.key;
1496 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1497 struct srp_request *req)
1499 struct srp_target_port *target = ch->target;
1500 struct scatterlist *scat;
1501 struct srp_cmd *cmd = req->cmd->buf;
1502 int len, nents, count, ret;
1503 struct srp_device *dev;
1504 struct ib_device *ibdev;
1505 struct srp_map_state state;
1506 struct srp_indirect_buf *indirect_hdr;
1507 u32 idb_len, table_len;
1511 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1512 return sizeof (struct srp_cmd);
1514 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1515 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1516 shost_printk(KERN_WARNING, target->scsi_host,
1517 PFX "Unhandled data direction %d\n",
1518 scmnd->sc_data_direction);
1522 nents = scsi_sg_count(scmnd);
1523 scat = scsi_sglist(scmnd);
1525 dev = target->srp_host->srp_dev;
1528 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1529 if (unlikely(count == 0))
1532 fmt = SRP_DATA_DESC_DIRECT;
1533 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1535 if (count == 1 && target->global_mr) {
1537 * The midlayer only generated a single gather/scatter
1538 * entry, or DMA mapping coalesced everything to a
1539 * single entry. So a direct descriptor along with
1540 * the DMA MR suffices.
1542 struct srp_direct_buf *buf = (void *) cmd->add_data;
1544 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1545 buf->key = cpu_to_be32(target->global_mr->rkey);
1546 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1553 * We have more than one scatter/gather entry, so build our indirect
1554 * descriptor table, trying to merge as many entries as we can.
1556 indirect_hdr = (void *) cmd->add_data;
1558 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1559 target->indirect_size, DMA_TO_DEVICE);
1561 memset(&state, 0, sizeof(state));
1562 srp_map_sg(&state, ch, req, scat, count);
1564 /* We've mapped the request, now pull as much of the indirect
1565 * descriptor table as we can into the command buffer. If this
1566 * target is not using an external indirect table, we are
1567 * guaranteed to fit into the command, as the SCSI layer won't
1568 * give us more S/G entries than we allow.
1570 if (state.ndesc == 1) {
1572 * Memory registration collapsed the sg-list into one entry,
1573 * so use a direct descriptor.
1575 struct srp_direct_buf *buf = (void *) cmd->add_data;
1577 *buf = req->indirect_desc[0];
1581 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1582 !target->allow_ext_sg)) {
1583 shost_printk(KERN_ERR, target->scsi_host,
1584 "Could not fit S/G list into SRP_CMD\n");
1588 count = min(state.ndesc, target->cmd_sg_cnt);
1589 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1590 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1592 fmt = SRP_DATA_DESC_INDIRECT;
1593 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1594 len += count * sizeof (struct srp_direct_buf);
1596 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1597 count * sizeof (struct srp_direct_buf));
1599 if (!target->global_mr) {
1600 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1601 idb_len, &idb_rkey);
1606 idb_rkey = target->global_mr->rkey;
1609 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1610 indirect_hdr->table_desc.key = idb_rkey;
1611 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1612 indirect_hdr->len = cpu_to_be32(state.total_len);
1614 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1615 cmd->data_out_desc_cnt = count;
1617 cmd->data_in_desc_cnt = count;
1619 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1623 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1624 cmd->buf_fmt = fmt << 4;
1632 * Return an IU and possible credit to the free pool
1634 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1635 enum srp_iu_type iu_type)
1637 unsigned long flags;
1639 spin_lock_irqsave(&ch->lock, flags);
1640 list_add(&iu->list, &ch->free_tx);
1641 if (iu_type != SRP_IU_RSP)
1643 spin_unlock_irqrestore(&ch->lock, flags);
1647 * Must be called with ch->lock held to protect req_lim and free_tx.
1648 * If IU is not sent, it must be returned using srp_put_tx_iu().
1651 * An upper limit for the number of allocated information units for each
1653 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1654 * more than Scsi_Host.can_queue requests.
1655 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1656 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1657 * one unanswered SRP request to an initiator.
1659 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1660 enum srp_iu_type iu_type)
1662 struct srp_target_port *target = ch->target;
1663 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1666 srp_send_completion(ch->send_cq, ch);
1668 if (list_empty(&ch->free_tx))
1671 /* Initiator responses to target requests do not consume credits */
1672 if (iu_type != SRP_IU_RSP) {
1673 if (ch->req_lim <= rsv) {
1674 ++target->zero_req_lim;
1681 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1682 list_del(&iu->list);
1686 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1688 struct srp_target_port *target = ch->target;
1690 struct ib_send_wr wr, *bad_wr;
1692 list.addr = iu->dma;
1694 list.lkey = target->lkey;
1697 wr.wr_id = (uintptr_t) iu;
1700 wr.opcode = IB_WR_SEND;
1701 wr.send_flags = IB_SEND_SIGNALED;
1703 return ib_post_send(ch->qp, &wr, &bad_wr);
1706 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1708 struct srp_target_port *target = ch->target;
1709 struct ib_recv_wr wr, *bad_wr;
1712 list.addr = iu->dma;
1713 list.length = iu->size;
1714 list.lkey = target->lkey;
1717 wr.wr_id = (uintptr_t) iu;
1721 return ib_post_recv(ch->qp, &wr, &bad_wr);
1724 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1726 struct srp_target_port *target = ch->target;
1727 struct srp_request *req;
1728 struct scsi_cmnd *scmnd;
1729 unsigned long flags;
1731 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1732 spin_lock_irqsave(&ch->lock, flags);
1733 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1734 spin_unlock_irqrestore(&ch->lock, flags);
1736 ch->tsk_mgmt_status = -1;
1737 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1738 ch->tsk_mgmt_status = rsp->data[3];
1739 complete(&ch->tsk_mgmt_done);
1741 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1743 req = (void *)scmnd->host_scribble;
1744 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1747 shost_printk(KERN_ERR, target->scsi_host,
1748 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1749 rsp->tag, ch - target->ch, ch->qp->qp_num);
1751 spin_lock_irqsave(&ch->lock, flags);
1752 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1753 spin_unlock_irqrestore(&ch->lock, flags);
1757 scmnd->result = rsp->status;
1759 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1760 memcpy(scmnd->sense_buffer, rsp->data +
1761 be32_to_cpu(rsp->resp_data_len),
1762 min_t(int, be32_to_cpu(rsp->sense_data_len),
1763 SCSI_SENSE_BUFFERSIZE));
1766 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1767 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1768 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1769 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1770 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1771 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1772 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1773 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1775 srp_free_req(ch, req, scmnd,
1776 be32_to_cpu(rsp->req_lim_delta));
1778 scmnd->host_scribble = NULL;
1779 scmnd->scsi_done(scmnd);
1783 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1786 struct srp_target_port *target = ch->target;
1787 struct ib_device *dev = target->srp_host->srp_dev->dev;
1788 unsigned long flags;
1792 spin_lock_irqsave(&ch->lock, flags);
1793 ch->req_lim += req_delta;
1794 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1795 spin_unlock_irqrestore(&ch->lock, flags);
1798 shost_printk(KERN_ERR, target->scsi_host, PFX
1799 "no IU available to send response\n");
1803 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1804 memcpy(iu->buf, rsp, len);
1805 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1807 err = srp_post_send(ch, iu, len);
1809 shost_printk(KERN_ERR, target->scsi_host, PFX
1810 "unable to post response: %d\n", err);
1811 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1817 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1818 struct srp_cred_req *req)
1820 struct srp_cred_rsp rsp = {
1821 .opcode = SRP_CRED_RSP,
1824 s32 delta = be32_to_cpu(req->req_lim_delta);
1826 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1827 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1828 "problems processing SRP_CRED_REQ\n");
1831 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1832 struct srp_aer_req *req)
1834 struct srp_target_port *target = ch->target;
1835 struct srp_aer_rsp rsp = {
1836 .opcode = SRP_AER_RSP,
1839 s32 delta = be32_to_cpu(req->req_lim_delta);
1841 shost_printk(KERN_ERR, target->scsi_host, PFX
1842 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1844 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1845 shost_printk(KERN_ERR, target->scsi_host, PFX
1846 "problems processing SRP_AER_REQ\n");
1849 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1851 struct srp_target_port *target = ch->target;
1852 struct ib_device *dev = target->srp_host->srp_dev->dev;
1853 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1857 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1860 opcode = *(u8 *) iu->buf;
1863 shost_printk(KERN_ERR, target->scsi_host,
1864 PFX "recv completion, opcode 0x%02x\n", opcode);
1865 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1866 iu->buf, wc->byte_len, true);
1871 srp_process_rsp(ch, iu->buf);
1875 srp_process_cred_req(ch, iu->buf);
1879 srp_process_aer_req(ch, iu->buf);
1883 /* XXX Handle target logout */
1884 shost_printk(KERN_WARNING, target->scsi_host,
1885 PFX "Got target logout request\n");
1889 shost_printk(KERN_WARNING, target->scsi_host,
1890 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1894 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1897 res = srp_post_recv(ch, iu);
1899 shost_printk(KERN_ERR, target->scsi_host,
1900 PFX "Recv failed with error code %d\n", res);
1904 * srp_tl_err_work() - handle a transport layer error
1905 * @work: Work structure embedded in an SRP target port.
1907 * Note: This function may get invoked before the rport has been created,
1908 * hence the target->rport test.
1910 static void srp_tl_err_work(struct work_struct *work)
1912 struct srp_target_port *target;
1914 target = container_of(work, struct srp_target_port, tl_err_work);
1916 srp_start_tl_fail_timers(target->rport);
1919 static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1920 bool send_err, struct srp_rdma_ch *ch)
1922 struct srp_target_port *target = ch->target;
1924 if (wr_id == SRP_LAST_WR_ID) {
1925 complete(&ch->done);
1929 if (ch->connected && !target->qp_in_error) {
1930 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1931 shost_printk(KERN_ERR, target->scsi_host, PFX
1932 "LOCAL_INV failed with status %s (%d)\n",
1933 ib_wc_status_msg(wc_status), wc_status);
1934 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1935 shost_printk(KERN_ERR, target->scsi_host, PFX
1936 "FAST_REG_MR failed status %s (%d)\n",
1937 ib_wc_status_msg(wc_status), wc_status);
1939 shost_printk(KERN_ERR, target->scsi_host,
1940 PFX "failed %s status %s (%d) for iu %p\n",
1941 send_err ? "send" : "receive",
1942 ib_wc_status_msg(wc_status), wc_status,
1943 (void *)(uintptr_t)wr_id);
1945 queue_work(system_long_wq, &target->tl_err_work);
1947 target->qp_in_error = true;
1950 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1952 struct srp_rdma_ch *ch = ch_ptr;
1955 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1956 while (ib_poll_cq(cq, 1, &wc) > 0) {
1957 if (likely(wc.status == IB_WC_SUCCESS)) {
1958 srp_handle_recv(ch, &wc);
1960 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1965 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1967 struct srp_rdma_ch *ch = ch_ptr;
1971 while (ib_poll_cq(cq, 1, &wc) > 0) {
1972 if (likely(wc.status == IB_WC_SUCCESS)) {
1973 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1974 list_add(&iu->list, &ch->free_tx);
1976 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1981 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1983 struct srp_target_port *target = host_to_target(shost);
1984 struct srp_rport *rport = target->rport;
1985 struct srp_rdma_ch *ch;
1986 struct srp_request *req;
1988 struct srp_cmd *cmd;
1989 struct ib_device *dev;
1990 unsigned long flags;
1994 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1997 * The SCSI EH thread is the only context from which srp_queuecommand()
1998 * can get invoked for blocked devices (SDEV_BLOCK /
1999 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2000 * locking the rport mutex if invoked from inside the SCSI EH.
2003 mutex_lock(&rport->mutex);
2005 scmnd->result = srp_chkready(target->rport);
2006 if (unlikely(scmnd->result))
2009 WARN_ON_ONCE(scmnd->request->tag < 0);
2010 tag = blk_mq_unique_tag(scmnd->request);
2011 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2012 idx = blk_mq_unique_tag_to_tag(tag);
2013 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2014 dev_name(&shost->shost_gendev), tag, idx,
2015 target->req_ring_size);
2017 spin_lock_irqsave(&ch->lock, flags);
2018 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2019 spin_unlock_irqrestore(&ch->lock, flags);
2024 req = &ch->req_ring[idx];
2025 dev = target->srp_host->srp_dev->dev;
2026 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2029 scmnd->host_scribble = (void *) req;
2032 memset(cmd, 0, sizeof *cmd);
2034 cmd->opcode = SRP_CMD;
2035 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2037 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2042 len = srp_map_data(scmnd, ch, req);
2044 shost_printk(KERN_ERR, target->scsi_host,
2045 PFX "Failed to map data (%d)\n", len);
2047 * If we ran out of memory descriptors (-ENOMEM) because an
2048 * application is queuing many requests with more than
2049 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2050 * to reduce queue depth temporarily.
2052 scmnd->result = len == -ENOMEM ?
2053 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2057 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2060 if (srp_post_send(ch, iu, len)) {
2061 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2069 mutex_unlock(&rport->mutex);
2074 srp_unmap_data(scmnd, ch, req);
2077 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2080 * Avoid that the loops that iterate over the request ring can
2081 * encounter a dangling SCSI command pointer.
2086 if (scmnd->result) {
2087 scmnd->scsi_done(scmnd);
2090 ret = SCSI_MLQUEUE_HOST_BUSY;
2097 * Note: the resources allocated in this function are freed in
2100 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2102 struct srp_target_port *target = ch->target;
2105 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2109 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2114 for (i = 0; i < target->queue_size; ++i) {
2115 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2117 GFP_KERNEL, DMA_FROM_DEVICE);
2118 if (!ch->rx_ring[i])
2122 for (i = 0; i < target->queue_size; ++i) {
2123 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2125 GFP_KERNEL, DMA_TO_DEVICE);
2126 if (!ch->tx_ring[i])
2129 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2135 for (i = 0; i < target->queue_size; ++i) {
2136 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2137 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2150 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2152 uint64_t T_tr_ns, max_compl_time_ms;
2153 uint32_t rq_tmo_jiffies;
2156 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2157 * table 91), both the QP timeout and the retry count have to be set
2158 * for RC QP's during the RTR to RTS transition.
2160 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2161 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2164 * Set target->rq_tmo_jiffies to one second more than the largest time
2165 * it can take before an error completion is generated. See also
2166 * C9-140..142 in the IBTA spec for more information about how to
2167 * convert the QP Local ACK Timeout value to nanoseconds.
2169 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2170 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2171 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2172 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2174 return rq_tmo_jiffies;
2177 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2178 const struct srp_login_rsp *lrsp,
2179 struct srp_rdma_ch *ch)
2181 struct srp_target_port *target = ch->target;
2182 struct ib_qp_attr *qp_attr = NULL;
2187 if (lrsp->opcode == SRP_LOGIN_RSP) {
2188 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2189 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2192 * Reserve credits for task management so we don't
2193 * bounce requests back to the SCSI mid-layer.
2195 target->scsi_host->can_queue
2196 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2197 target->scsi_host->can_queue);
2198 target->scsi_host->cmd_per_lun
2199 = min_t(int, target->scsi_host->can_queue,
2200 target->scsi_host->cmd_per_lun);
2202 shost_printk(KERN_WARNING, target->scsi_host,
2203 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2209 ret = srp_alloc_iu_bufs(ch);
2215 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2219 qp_attr->qp_state = IB_QPS_RTR;
2220 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2224 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2228 for (i = 0; i < target->queue_size; i++) {
2229 struct srp_iu *iu = ch->rx_ring[i];
2231 ret = srp_post_recv(ch, iu);
2236 qp_attr->qp_state = IB_QPS_RTS;
2237 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2241 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2243 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2247 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2256 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2257 struct ib_cm_event *event,
2258 struct srp_rdma_ch *ch)
2260 struct srp_target_port *target = ch->target;
2261 struct Scsi_Host *shost = target->scsi_host;
2262 struct ib_class_port_info *cpi;
2265 switch (event->param.rej_rcvd.reason) {
2266 case IB_CM_REJ_PORT_CM_REDIRECT:
2267 cpi = event->param.rej_rcvd.ari;
2268 ch->path.dlid = cpi->redirect_lid;
2269 ch->path.pkey = cpi->redirect_pkey;
2270 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2271 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2273 ch->status = ch->path.dlid ?
2274 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2277 case IB_CM_REJ_PORT_REDIRECT:
2278 if (srp_target_is_topspin(target)) {
2280 * Topspin/Cisco SRP gateways incorrectly send
2281 * reject reason code 25 when they mean 24
2284 memcpy(ch->path.dgid.raw,
2285 event->param.rej_rcvd.ari, 16);
2287 shost_printk(KERN_DEBUG, shost,
2288 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2289 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2290 be64_to_cpu(ch->path.dgid.global.interface_id));
2292 ch->status = SRP_PORT_REDIRECT;
2294 shost_printk(KERN_WARNING, shost,
2295 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2296 ch->status = -ECONNRESET;
2300 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2301 shost_printk(KERN_WARNING, shost,
2302 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2303 ch->status = -ECONNRESET;
2306 case IB_CM_REJ_CONSUMER_DEFINED:
2307 opcode = *(u8 *) event->private_data;
2308 if (opcode == SRP_LOGIN_REJ) {
2309 struct srp_login_rej *rej = event->private_data;
2310 u32 reason = be32_to_cpu(rej->reason);
2312 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2313 shost_printk(KERN_WARNING, shost,
2314 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2316 shost_printk(KERN_WARNING, shost, PFX
2317 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2319 target->orig_dgid.raw, reason);
2321 shost_printk(KERN_WARNING, shost,
2322 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2323 " opcode 0x%02x\n", opcode);
2324 ch->status = -ECONNRESET;
2327 case IB_CM_REJ_STALE_CONN:
2328 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2329 ch->status = SRP_STALE_CONN;
2333 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2334 event->param.rej_rcvd.reason);
2335 ch->status = -ECONNRESET;
2339 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2341 struct srp_rdma_ch *ch = cm_id->context;
2342 struct srp_target_port *target = ch->target;
2345 switch (event->event) {
2346 case IB_CM_REQ_ERROR:
2347 shost_printk(KERN_DEBUG, target->scsi_host,
2348 PFX "Sending CM REQ failed\n");
2350 ch->status = -ECONNRESET;
2353 case IB_CM_REP_RECEIVED:
2355 srp_cm_rep_handler(cm_id, event->private_data, ch);
2358 case IB_CM_REJ_RECEIVED:
2359 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2362 srp_cm_rej_handler(cm_id, event, ch);
2365 case IB_CM_DREQ_RECEIVED:
2366 shost_printk(KERN_WARNING, target->scsi_host,
2367 PFX "DREQ received - connection closed\n");
2368 ch->connected = false;
2369 if (ib_send_cm_drep(cm_id, NULL, 0))
2370 shost_printk(KERN_ERR, target->scsi_host,
2371 PFX "Sending CM DREP failed\n");
2372 queue_work(system_long_wq, &target->tl_err_work);
2375 case IB_CM_TIMEWAIT_EXIT:
2376 shost_printk(KERN_ERR, target->scsi_host,
2377 PFX "connection closed\n");
2383 case IB_CM_MRA_RECEIVED:
2384 case IB_CM_DREQ_ERROR:
2385 case IB_CM_DREP_RECEIVED:
2389 shost_printk(KERN_WARNING, target->scsi_host,
2390 PFX "Unhandled CM event %d\n", event->event);
2395 complete(&ch->done);
2401 * srp_change_queue_depth - setting device queue depth
2402 * @sdev: scsi device struct
2403 * @qdepth: requested queue depth
2405 * Returns queue depth.
2408 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2410 if (!sdev->tagged_supported)
2412 return scsi_change_queue_depth(sdev, qdepth);
2415 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2418 struct srp_target_port *target = ch->target;
2419 struct srp_rport *rport = target->rport;
2420 struct ib_device *dev = target->srp_host->srp_dev->dev;
2422 struct srp_tsk_mgmt *tsk_mgmt;
2424 if (!ch->connected || target->qp_in_error)
2427 init_completion(&ch->tsk_mgmt_done);
2430 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2431 * invoked while a task management function is being sent.
2433 mutex_lock(&rport->mutex);
2434 spin_lock_irq(&ch->lock);
2435 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2436 spin_unlock_irq(&ch->lock);
2439 mutex_unlock(&rport->mutex);
2444 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2447 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2449 tsk_mgmt->opcode = SRP_TSK_MGMT;
2450 int_to_scsilun(lun, &tsk_mgmt->lun);
2451 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
2452 tsk_mgmt->tsk_mgmt_func = func;
2453 tsk_mgmt->task_tag = req_tag;
2455 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2457 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2458 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2459 mutex_unlock(&rport->mutex);
2463 mutex_unlock(&rport->mutex);
2465 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2466 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2472 static int srp_abort(struct scsi_cmnd *scmnd)
2474 struct srp_target_port *target = host_to_target(scmnd->device->host);
2475 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2478 struct srp_rdma_ch *ch;
2481 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2485 tag = blk_mq_unique_tag(scmnd->request);
2486 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2487 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2489 ch = &target->ch[ch_idx];
2490 if (!srp_claim_req(ch, req, NULL, scmnd))
2492 shost_printk(KERN_ERR, target->scsi_host,
2493 "Sending SRP abort for tag %#x\n", tag);
2494 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2495 SRP_TSK_ABORT_TASK) == 0)
2497 else if (target->rport->state == SRP_RPORT_LOST)
2501 srp_free_req(ch, req, scmnd, 0);
2502 scmnd->result = DID_ABORT << 16;
2503 scmnd->scsi_done(scmnd);
2508 static int srp_reset_device(struct scsi_cmnd *scmnd)
2510 struct srp_target_port *target = host_to_target(scmnd->device->host);
2511 struct srp_rdma_ch *ch;
2514 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2516 ch = &target->ch[0];
2517 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2520 if (ch->tsk_mgmt_status)
2523 for (i = 0; i < target->ch_count; i++) {
2524 ch = &target->ch[i];
2525 for (i = 0; i < target->req_ring_size; ++i) {
2526 struct srp_request *req = &ch->req_ring[i];
2528 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2535 static int srp_reset_host(struct scsi_cmnd *scmnd)
2537 struct srp_target_port *target = host_to_target(scmnd->device->host);
2539 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2541 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2544 static int srp_slave_configure(struct scsi_device *sdev)
2546 struct Scsi_Host *shost = sdev->host;
2547 struct srp_target_port *target = host_to_target(shost);
2548 struct request_queue *q = sdev->request_queue;
2549 unsigned long timeout;
2551 if (sdev->type == TYPE_DISK) {
2552 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2553 blk_queue_rq_timeout(q, timeout);
2559 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2562 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2564 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2567 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2570 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2572 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2575 static ssize_t show_service_id(struct device *dev,
2576 struct device_attribute *attr, char *buf)
2578 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2580 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2583 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2586 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2588 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2591 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2594 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2596 return sprintf(buf, "%pI6\n", target->sgid.raw);
2599 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2602 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2603 struct srp_rdma_ch *ch = &target->ch[0];
2605 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2608 static ssize_t show_orig_dgid(struct device *dev,
2609 struct device_attribute *attr, char *buf)
2611 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2613 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2616 static ssize_t show_req_lim(struct device *dev,
2617 struct device_attribute *attr, char *buf)
2619 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2620 struct srp_rdma_ch *ch;
2621 int i, req_lim = INT_MAX;
2623 for (i = 0; i < target->ch_count; i++) {
2624 ch = &target->ch[i];
2625 req_lim = min(req_lim, ch->req_lim);
2627 return sprintf(buf, "%d\n", req_lim);
2630 static ssize_t show_zero_req_lim(struct device *dev,
2631 struct device_attribute *attr, char *buf)
2633 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2635 return sprintf(buf, "%d\n", target->zero_req_lim);
2638 static ssize_t show_local_ib_port(struct device *dev,
2639 struct device_attribute *attr, char *buf)
2641 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2643 return sprintf(buf, "%d\n", target->srp_host->port);
2646 static ssize_t show_local_ib_device(struct device *dev,
2647 struct device_attribute *attr, char *buf)
2649 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2651 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2654 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2657 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2659 return sprintf(buf, "%d\n", target->ch_count);
2662 static ssize_t show_comp_vector(struct device *dev,
2663 struct device_attribute *attr, char *buf)
2665 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2667 return sprintf(buf, "%d\n", target->comp_vector);
2670 static ssize_t show_tl_retry_count(struct device *dev,
2671 struct device_attribute *attr, char *buf)
2673 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2675 return sprintf(buf, "%d\n", target->tl_retry_count);
2678 static ssize_t show_cmd_sg_entries(struct device *dev,
2679 struct device_attribute *attr, char *buf)
2681 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2683 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2686 static ssize_t show_allow_ext_sg(struct device *dev,
2687 struct device_attribute *attr, char *buf)
2689 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2691 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2694 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2695 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2696 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2697 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2698 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2699 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2700 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2701 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2702 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2703 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2704 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2705 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2706 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2707 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2708 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2709 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2711 static struct device_attribute *srp_host_attrs[] = {
2714 &dev_attr_service_id,
2718 &dev_attr_orig_dgid,
2720 &dev_attr_zero_req_lim,
2721 &dev_attr_local_ib_port,
2722 &dev_attr_local_ib_device,
2724 &dev_attr_comp_vector,
2725 &dev_attr_tl_retry_count,
2726 &dev_attr_cmd_sg_entries,
2727 &dev_attr_allow_ext_sg,
2731 static struct scsi_host_template srp_template = {
2732 .module = THIS_MODULE,
2733 .name = "InfiniBand SRP initiator",
2734 .proc_name = DRV_NAME,
2735 .slave_configure = srp_slave_configure,
2736 .info = srp_target_info,
2737 .queuecommand = srp_queuecommand,
2738 .change_queue_depth = srp_change_queue_depth,
2739 .eh_abort_handler = srp_abort,
2740 .eh_device_reset_handler = srp_reset_device,
2741 .eh_host_reset_handler = srp_reset_host,
2742 .skip_settle_delay = true,
2743 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2744 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2746 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2747 .use_clustering = ENABLE_CLUSTERING,
2748 .shost_attrs = srp_host_attrs,
2750 .track_queue_depth = 1,
2753 static int srp_sdev_count(struct Scsi_Host *host)
2755 struct scsi_device *sdev;
2758 shost_for_each_device(sdev, host)
2766 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2767 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2768 * removal has been scheduled.
2769 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2771 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2773 struct srp_rport_identifiers ids;
2774 struct srp_rport *rport;
2776 target->state = SRP_TARGET_SCANNING;
2777 sprintf(target->target_name, "SRP.T10:%016llX",
2778 be64_to_cpu(target->id_ext));
2780 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2783 memcpy(ids.port_id, &target->id_ext, 8);
2784 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2785 ids.roles = SRP_RPORT_ROLE_TARGET;
2786 rport = srp_rport_add(target->scsi_host, &ids);
2787 if (IS_ERR(rport)) {
2788 scsi_remove_host(target->scsi_host);
2789 return PTR_ERR(rport);
2792 rport->lld_data = target;
2793 target->rport = rport;
2795 spin_lock(&host->target_lock);
2796 list_add_tail(&target->list, &host->target_list);
2797 spin_unlock(&host->target_lock);
2799 scsi_scan_target(&target->scsi_host->shost_gendev,
2800 0, target->scsi_id, SCAN_WILD_CARD, 0);
2802 if (srp_connected_ch(target) < target->ch_count ||
2803 target->qp_in_error) {
2804 shost_printk(KERN_INFO, target->scsi_host,
2805 PFX "SCSI scan failed - removing SCSI host\n");
2806 srp_queue_remove_work(target);
2810 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2811 dev_name(&target->scsi_host->shost_gendev),
2812 srp_sdev_count(target->scsi_host));
2814 spin_lock_irq(&target->lock);
2815 if (target->state == SRP_TARGET_SCANNING)
2816 target->state = SRP_TARGET_LIVE;
2817 spin_unlock_irq(&target->lock);
2823 static void srp_release_dev(struct device *dev)
2825 struct srp_host *host =
2826 container_of(dev, struct srp_host, dev);
2828 complete(&host->released);
2831 static struct class srp_class = {
2832 .name = "infiniband_srp",
2833 .dev_release = srp_release_dev
2837 * srp_conn_unique() - check whether the connection to a target is unique
2839 * @target: SRP target port.
2841 static bool srp_conn_unique(struct srp_host *host,
2842 struct srp_target_port *target)
2844 struct srp_target_port *t;
2847 if (target->state == SRP_TARGET_REMOVED)
2852 spin_lock(&host->target_lock);
2853 list_for_each_entry(t, &host->target_list, list) {
2855 target->id_ext == t->id_ext &&
2856 target->ioc_guid == t->ioc_guid &&
2857 target->initiator_ext == t->initiator_ext) {
2862 spin_unlock(&host->target_lock);
2869 * Target ports are added by writing
2871 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2872 * pkey=<P_Key>,service_id=<service ID>
2874 * to the add_target sysfs attribute.
2878 SRP_OPT_ID_EXT = 1 << 0,
2879 SRP_OPT_IOC_GUID = 1 << 1,
2880 SRP_OPT_DGID = 1 << 2,
2881 SRP_OPT_PKEY = 1 << 3,
2882 SRP_OPT_SERVICE_ID = 1 << 4,
2883 SRP_OPT_MAX_SECT = 1 << 5,
2884 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2885 SRP_OPT_IO_CLASS = 1 << 7,
2886 SRP_OPT_INITIATOR_EXT = 1 << 8,
2887 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2888 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2889 SRP_OPT_SG_TABLESIZE = 1 << 11,
2890 SRP_OPT_COMP_VECTOR = 1 << 12,
2891 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2892 SRP_OPT_QUEUE_SIZE = 1 << 14,
2893 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2897 SRP_OPT_SERVICE_ID),
2900 static const match_table_t srp_opt_tokens = {
2901 { SRP_OPT_ID_EXT, "id_ext=%s" },
2902 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2903 { SRP_OPT_DGID, "dgid=%s" },
2904 { SRP_OPT_PKEY, "pkey=%x" },
2905 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2906 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2907 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2908 { SRP_OPT_IO_CLASS, "io_class=%x" },
2909 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2910 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2911 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2912 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2913 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2914 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2915 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2916 { SRP_OPT_ERR, NULL }
2919 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2921 char *options, *sep_opt;
2924 substring_t args[MAX_OPT_ARGS];
2930 options = kstrdup(buf, GFP_KERNEL);
2935 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2939 token = match_token(p, srp_opt_tokens, args);
2943 case SRP_OPT_ID_EXT:
2944 p = match_strdup(args);
2949 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2953 case SRP_OPT_IOC_GUID:
2954 p = match_strdup(args);
2959 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2964 p = match_strdup(args);
2969 if (strlen(p) != 32) {
2970 pr_warn("bad dest GID parameter '%s'\n", p);
2975 for (i = 0; i < 16; ++i) {
2976 strlcpy(dgid, p + i * 2, sizeof(dgid));
2977 if (sscanf(dgid, "%hhx",
2978 &target->orig_dgid.raw[i]) < 1) {
2988 if (match_hex(args, &token)) {
2989 pr_warn("bad P_Key parameter '%s'\n", p);
2992 target->pkey = cpu_to_be16(token);
2995 case SRP_OPT_SERVICE_ID:
2996 p = match_strdup(args);
3001 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3005 case SRP_OPT_MAX_SECT:
3006 if (match_int(args, &token)) {
3007 pr_warn("bad max sect parameter '%s'\n", p);
3010 target->scsi_host->max_sectors = token;
3013 case SRP_OPT_QUEUE_SIZE:
3014 if (match_int(args, &token) || token < 1) {
3015 pr_warn("bad queue_size parameter '%s'\n", p);
3018 target->scsi_host->can_queue = token;
3019 target->queue_size = token + SRP_RSP_SQ_SIZE +
3020 SRP_TSK_MGMT_SQ_SIZE;
3021 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3022 target->scsi_host->cmd_per_lun = token;
3025 case SRP_OPT_MAX_CMD_PER_LUN:
3026 if (match_int(args, &token) || token < 1) {
3027 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3031 target->scsi_host->cmd_per_lun = token;
3034 case SRP_OPT_IO_CLASS:
3035 if (match_hex(args, &token)) {
3036 pr_warn("bad IO class parameter '%s'\n", p);
3039 if (token != SRP_REV10_IB_IO_CLASS &&
3040 token != SRP_REV16A_IB_IO_CLASS) {
3041 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3042 token, SRP_REV10_IB_IO_CLASS,
3043 SRP_REV16A_IB_IO_CLASS);
3046 target->io_class = token;
3049 case SRP_OPT_INITIATOR_EXT:
3050 p = match_strdup(args);
3055 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3059 case SRP_OPT_CMD_SG_ENTRIES:
3060 if (match_int(args, &token) || token < 1 || token > 255) {
3061 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3065 target->cmd_sg_cnt = token;
3068 case SRP_OPT_ALLOW_EXT_SG:
3069 if (match_int(args, &token)) {
3070 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3073 target->allow_ext_sg = !!token;
3076 case SRP_OPT_SG_TABLESIZE:
3077 if (match_int(args, &token) || token < 1 ||
3078 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3079 pr_warn("bad max sg_tablesize parameter '%s'\n",
3083 target->sg_tablesize = token;
3086 case SRP_OPT_COMP_VECTOR:
3087 if (match_int(args, &token) || token < 0) {
3088 pr_warn("bad comp_vector parameter '%s'\n", p);
3091 target->comp_vector = token;
3094 case SRP_OPT_TL_RETRY_COUNT:
3095 if (match_int(args, &token) || token < 2 || token > 7) {
3096 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3100 target->tl_retry_count = token;
3104 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3110 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3113 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3114 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3115 !(srp_opt_tokens[i].token & opt_mask))
3116 pr_warn("target creation request is missing parameter '%s'\n",
3117 srp_opt_tokens[i].pattern);
3119 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3120 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3121 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3122 target->scsi_host->cmd_per_lun,
3123 target->scsi_host->can_queue);
3130 static ssize_t srp_create_target(struct device *dev,
3131 struct device_attribute *attr,
3132 const char *buf, size_t count)
3134 struct srp_host *host =
3135 container_of(dev, struct srp_host, dev);
3136 struct Scsi_Host *target_host;
3137 struct srp_target_port *target;
3138 struct srp_rdma_ch *ch;
3139 struct srp_device *srp_dev = host->srp_dev;
3140 struct ib_device *ibdev = srp_dev->dev;
3141 int ret, node_idx, node, cpu, i;
3142 bool multich = false;
3144 target_host = scsi_host_alloc(&srp_template,
3145 sizeof (struct srp_target_port));
3149 target_host->transportt = ib_srp_transport_template;
3150 target_host->max_channel = 0;
3151 target_host->max_id = 1;
3152 target_host->max_lun = -1LL;
3153 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3155 target = host_to_target(target_host);
3157 target->io_class = SRP_REV16A_IB_IO_CLASS;
3158 target->scsi_host = target_host;
3159 target->srp_host = host;
3160 target->lkey = host->srp_dev->pd->local_dma_lkey;
3161 target->global_mr = host->srp_dev->global_mr;
3162 target->cmd_sg_cnt = cmd_sg_entries;
3163 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3164 target->allow_ext_sg = allow_ext_sg;
3165 target->tl_retry_count = 7;
3166 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3169 * Avoid that the SCSI host can be removed by srp_remove_target()
3170 * before this function returns.
3172 scsi_host_get(target->scsi_host);
3174 mutex_lock(&host->add_target_mutex);
3176 ret = srp_parse_options(buf, target);
3180 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3184 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3186 if (!srp_conn_unique(target->srp_host, target)) {
3187 shost_printk(KERN_INFO, target->scsi_host,
3188 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3189 be64_to_cpu(target->id_ext),
3190 be64_to_cpu(target->ioc_guid),
3191 be64_to_cpu(target->initiator_ext));
3196 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3197 target->cmd_sg_cnt < target->sg_tablesize) {
3198 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3199 target->sg_tablesize = target->cmd_sg_cnt;
3202 target_host->sg_tablesize = target->sg_tablesize;
3203 target->indirect_size = target->sg_tablesize *
3204 sizeof (struct srp_direct_buf);
3205 target->max_iu_len = sizeof (struct srp_cmd) +
3206 sizeof (struct srp_indirect_buf) +
3207 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3209 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3210 INIT_WORK(&target->remove_work, srp_remove_work);
3211 spin_lock_init(&target->lock);
3212 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3217 target->ch_count = max_t(unsigned, num_online_nodes(),
3219 min(4 * num_online_nodes(),
3220 ibdev->num_comp_vectors),
3221 num_online_cpus()));
3222 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3228 for_each_online_node(node) {
3229 const int ch_start = (node_idx * target->ch_count /
3230 num_online_nodes());
3231 const int ch_end = ((node_idx + 1) * target->ch_count /
3232 num_online_nodes());
3233 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3234 num_online_nodes() + target->comp_vector)
3235 % ibdev->num_comp_vectors;
3236 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3237 num_online_nodes() + target->comp_vector)
3238 % ibdev->num_comp_vectors;
3241 for_each_online_cpu(cpu) {
3242 if (cpu_to_node(cpu) != node)
3244 if (ch_start + cpu_idx >= ch_end)
3246 ch = &target->ch[ch_start + cpu_idx];
3247 ch->target = target;
3248 ch->comp_vector = cv_start == cv_end ? cv_start :
3249 cv_start + cpu_idx % (cv_end - cv_start);
3250 spin_lock_init(&ch->lock);
3251 INIT_LIST_HEAD(&ch->free_tx);
3252 ret = srp_new_cm_id(ch);
3254 goto err_disconnect;
3256 ret = srp_create_ch_ib(ch);
3258 goto err_disconnect;
3260 ret = srp_alloc_req_data(ch);
3262 goto err_disconnect;
3264 ret = srp_connect_ch(ch, multich);
3266 shost_printk(KERN_ERR, target->scsi_host,
3267 PFX "Connection %d/%d failed\n",
3270 if (node_idx == 0 && cpu_idx == 0) {
3271 goto err_disconnect;
3273 srp_free_ch_ib(target, ch);
3274 srp_free_req_data(target, ch);
3275 target->ch_count = ch - target->ch;
3287 target->scsi_host->nr_hw_queues = target->ch_count;
3289 ret = srp_add_target(host, target);
3291 goto err_disconnect;
3293 if (target->state != SRP_TARGET_REMOVED) {
3294 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3295 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3296 be64_to_cpu(target->id_ext),
3297 be64_to_cpu(target->ioc_guid),
3298 be16_to_cpu(target->pkey),
3299 be64_to_cpu(target->service_id),
3300 target->sgid.raw, target->orig_dgid.raw);
3306 mutex_unlock(&host->add_target_mutex);
3308 scsi_host_put(target->scsi_host);
3310 scsi_host_put(target->scsi_host);
3315 srp_disconnect_target(target);
3317 for (i = 0; i < target->ch_count; i++) {
3318 ch = &target->ch[i];
3319 srp_free_ch_ib(target, ch);
3320 srp_free_req_data(target, ch);
3327 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3329 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3332 struct srp_host *host = container_of(dev, struct srp_host, dev);
3334 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3337 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3339 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3342 struct srp_host *host = container_of(dev, struct srp_host, dev);
3344 return sprintf(buf, "%d\n", host->port);
3347 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3349 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3351 struct srp_host *host;
3353 host = kzalloc(sizeof *host, GFP_KERNEL);
3357 INIT_LIST_HEAD(&host->target_list);
3358 spin_lock_init(&host->target_lock);
3359 init_completion(&host->released);
3360 mutex_init(&host->add_target_mutex);
3361 host->srp_dev = device;
3364 host->dev.class = &srp_class;
3365 host->dev.parent = device->dev->dma_device;
3366 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3368 if (device_register(&host->dev))
3370 if (device_create_file(&host->dev, &dev_attr_add_target))
3372 if (device_create_file(&host->dev, &dev_attr_ibdev))
3374 if (device_create_file(&host->dev, &dev_attr_port))
3380 device_unregister(&host->dev);
3388 static void srp_add_one(struct ib_device *device)
3390 struct srp_device *srp_dev;
3391 struct ib_device_attr *dev_attr;
3392 struct srp_host *host;
3393 int mr_page_shift, p;
3394 u64 max_pages_per_mr;
3396 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3400 if (ib_query_device(device, dev_attr)) {
3401 pr_warn("Query device failed for %s\n", device->name);
3405 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3409 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3410 device->map_phys_fmr && device->unmap_fmr);
3411 srp_dev->has_fr = (dev_attr->device_cap_flags &
3412 IB_DEVICE_MEM_MGT_EXTENSIONS);
3413 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3414 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3416 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3417 (!srp_dev->has_fmr || prefer_fr));
3418 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3421 * Use the smallest page size supported by the HCA, down to a
3422 * minimum of 4096 bytes. We're unlikely to build large sglists
3423 * out of smaller entries.
3425 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3426 srp_dev->mr_page_size = 1 << mr_page_shift;
3427 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3428 max_pages_per_mr = dev_attr->max_mr_size;
3429 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3430 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3432 if (srp_dev->use_fast_reg) {
3433 srp_dev->max_pages_per_mr =
3434 min_t(u32, srp_dev->max_pages_per_mr,
3435 dev_attr->max_fast_reg_page_list_len);
3437 srp_dev->mr_max_size = srp_dev->mr_page_size *
3438 srp_dev->max_pages_per_mr;
3439 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3440 device->name, mr_page_shift, dev_attr->max_mr_size,
3441 dev_attr->max_fast_reg_page_list_len,
3442 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3444 INIT_LIST_HEAD(&srp_dev->dev_list);
3446 srp_dev->dev = device;
3447 srp_dev->pd = ib_alloc_pd(device);
3448 if (IS_ERR(srp_dev->pd))
3451 if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3452 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3453 IB_ACCESS_LOCAL_WRITE |
3454 IB_ACCESS_REMOTE_READ |
3455 IB_ACCESS_REMOTE_WRITE);
3456 if (IS_ERR(srp_dev->global_mr))
3459 srp_dev->global_mr = NULL;
3462 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3463 host = srp_add_port(srp_dev, p);
3465 list_add_tail(&host->list, &srp_dev->dev_list);
3468 ib_set_client_data(device, &srp_client, srp_dev);
3473 ib_dealloc_pd(srp_dev->pd);
3482 static void srp_remove_one(struct ib_device *device, void *client_data)
3484 struct srp_device *srp_dev;
3485 struct srp_host *host, *tmp_host;
3486 struct srp_target_port *target;
3488 srp_dev = client_data;
3492 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3493 device_unregister(&host->dev);
3495 * Wait for the sysfs entry to go away, so that no new
3496 * target ports can be created.
3498 wait_for_completion(&host->released);
3501 * Remove all target ports.
3503 spin_lock(&host->target_lock);
3504 list_for_each_entry(target, &host->target_list, list)
3505 srp_queue_remove_work(target);
3506 spin_unlock(&host->target_lock);
3509 * Wait for tl_err and target port removal tasks.
3511 flush_workqueue(system_long_wq);
3512 flush_workqueue(srp_remove_wq);
3517 if (srp_dev->global_mr)
3518 ib_dereg_mr(srp_dev->global_mr);
3519 ib_dealloc_pd(srp_dev->pd);
3524 static struct srp_function_template ib_srp_transport_functions = {
3525 .has_rport_state = true,
3526 .reset_timer_if_blocked = true,
3527 .reconnect_delay = &srp_reconnect_delay,
3528 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3529 .dev_loss_tmo = &srp_dev_loss_tmo,
3530 .reconnect = srp_rport_reconnect,
3531 .rport_delete = srp_rport_delete,
3532 .terminate_rport_io = srp_terminate_io,
3535 static int __init srp_init_module(void)
3539 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3541 if (srp_sg_tablesize) {
3542 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3543 if (!cmd_sg_entries)
3544 cmd_sg_entries = srp_sg_tablesize;
3547 if (!cmd_sg_entries)
3548 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3550 if (cmd_sg_entries > 255) {
3551 pr_warn("Clamping cmd_sg_entries to 255\n");
3552 cmd_sg_entries = 255;
3555 if (!indirect_sg_entries)
3556 indirect_sg_entries = cmd_sg_entries;
3557 else if (indirect_sg_entries < cmd_sg_entries) {
3558 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3560 indirect_sg_entries = cmd_sg_entries;
3563 srp_remove_wq = create_workqueue("srp_remove");
3564 if (!srp_remove_wq) {
3570 ib_srp_transport_template =
3571 srp_attach_transport(&ib_srp_transport_functions);
3572 if (!ib_srp_transport_template)
3575 ret = class_register(&srp_class);
3577 pr_err("couldn't register class infiniband_srp\n");
3581 ib_sa_register_client(&srp_sa_client);
3583 ret = ib_register_client(&srp_client);
3585 pr_err("couldn't register IB client\n");
3593 ib_sa_unregister_client(&srp_sa_client);
3594 class_unregister(&srp_class);
3597 srp_release_transport(ib_srp_transport_template);
3600 destroy_workqueue(srp_remove_wq);
3604 static void __exit srp_cleanup_module(void)
3606 ib_unregister_client(&srp_client);
3607 ib_sa_unregister_client(&srp_sa_client);
3608 class_unregister(&srp_class);
3609 srp_release_transport(ib_srp_transport_template);
3610 destroy_workqueue(srp_remove_wq);
3613 module_init(srp_init_module);
3614 module_exit(srp_cleanup_module);