IB/srp: Register the indirect data buffer descriptor
[firefly-linux-kernel-4.4.55.git] / drivers / infiniband / ulp / srp / ib_srp.c
1 /*
2  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
44
45 #include <linux/atomic.h>
46
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
51 #include <scsi/srp.h>
52 #include <scsi/scsi_transport_srp.h>
53
54 #include "ib_srp.h"
55
56 #define DRV_NAME        "ib_srp"
57 #define PFX             DRV_NAME ": "
58 #define DRV_VERSION     "2.0"
59 #define DRV_RELDATE     "July 26, 2015"
60
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRV_VERSION);
65 MODULE_INFO(release_date, DRV_RELDATE);
66
67 static unsigned int srp_sg_tablesize;
68 static unsigned int cmd_sg_entries;
69 static unsigned int indirect_sg_entries;
70 static bool allow_ext_sg;
71 static bool prefer_fr;
72 static bool register_always;
73 static int topspin_workarounds = 1;
74
75 module_param(srp_sg_tablesize, uint, 0444);
76 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
77
78 module_param(cmd_sg_entries, uint, 0444);
79 MODULE_PARM_DESC(cmd_sg_entries,
80                  "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
81
82 module_param(indirect_sg_entries, uint, 0444);
83 MODULE_PARM_DESC(indirect_sg_entries,
84                  "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
85
86 module_param(allow_ext_sg, bool, 0444);
87 MODULE_PARM_DESC(allow_ext_sg,
88                   "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
89
90 module_param(topspin_workarounds, int, 0444);
91 MODULE_PARM_DESC(topspin_workarounds,
92                  "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
93
94 module_param(prefer_fr, bool, 0444);
95 MODULE_PARM_DESC(prefer_fr,
96 "Whether to use fast registration if both FMR and fast registration are supported");
97
98 module_param(register_always, bool, 0444);
99 MODULE_PARM_DESC(register_always,
100                  "Use memory registration even for contiguous memory regions");
101
102 static const struct kernel_param_ops srp_tmo_ops;
103
104 static int srp_reconnect_delay = 10;
105 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
106                 S_IRUGO | S_IWUSR);
107 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
108
109 static int srp_fast_io_fail_tmo = 15;
110 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
111                 S_IRUGO | S_IWUSR);
112 MODULE_PARM_DESC(fast_io_fail_tmo,
113                  "Number of seconds between the observation of a transport"
114                  " layer error and failing all I/O. \"off\" means that this"
115                  " functionality is disabled.");
116
117 static int srp_dev_loss_tmo = 600;
118 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
119                 S_IRUGO | S_IWUSR);
120 MODULE_PARM_DESC(dev_loss_tmo,
121                  "Maximum number of seconds that the SRP transport should"
122                  " insulate transport layer errors. After this time has been"
123                  " exceeded the SCSI host is removed. Should be"
124                  " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125                  " if fast_io_fail_tmo has not been set. \"off\" means that"
126                  " this functionality is disabled.");
127
128 static unsigned ch_count;
129 module_param(ch_count, uint, 0444);
130 MODULE_PARM_DESC(ch_count,
131                  "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
132
133 static void srp_add_one(struct ib_device *device);
134 static void srp_remove_one(struct ib_device *device, void *client_data);
135 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
137 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
138
139 static struct scsi_transport_template *ib_srp_transport_template;
140 static struct workqueue_struct *srp_remove_wq;
141
142 static struct ib_client srp_client = {
143         .name   = "srp",
144         .add    = srp_add_one,
145         .remove = srp_remove_one
146 };
147
148 static struct ib_sa_client srp_sa_client;
149
150 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
151 {
152         int tmo = *(int *)kp->arg;
153
154         if (tmo >= 0)
155                 return sprintf(buffer, "%d", tmo);
156         else
157                 return sprintf(buffer, "off");
158 }
159
160 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161 {
162         int tmo, res;
163
164         res = srp_parse_tmo(&tmo, val);
165         if (res)
166                 goto out;
167
168         if (kp->arg == &srp_reconnect_delay)
169                 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
170                                     srp_dev_loss_tmo);
171         else if (kp->arg == &srp_fast_io_fail_tmo)
172                 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
173         else
174                 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
175                                     tmo);
176         if (res)
177                 goto out;
178         *(int *)kp->arg = tmo;
179
180 out:
181         return res;
182 }
183
184 static const struct kernel_param_ops srp_tmo_ops = {
185         .get = srp_tmo_get,
186         .set = srp_tmo_set,
187 };
188
189 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
190 {
191         return (struct srp_target_port *) host->hostdata;
192 }
193
194 static const char *srp_target_info(struct Scsi_Host *host)
195 {
196         return host_to_target(host)->target_name;
197 }
198
199 static int srp_target_is_topspin(struct srp_target_port *target)
200 {
201         static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
202         static const u8 cisco_oui[3]   = { 0x00, 0x1b, 0x0d };
203
204         return topspin_workarounds &&
205                 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
206                  !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
207 }
208
209 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
210                                    gfp_t gfp_mask,
211                                    enum dma_data_direction direction)
212 {
213         struct srp_iu *iu;
214
215         iu = kmalloc(sizeof *iu, gfp_mask);
216         if (!iu)
217                 goto out;
218
219         iu->buf = kzalloc(size, gfp_mask);
220         if (!iu->buf)
221                 goto out_free_iu;
222
223         iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
224                                     direction);
225         if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
226                 goto out_free_buf;
227
228         iu->size      = size;
229         iu->direction = direction;
230
231         return iu;
232
233 out_free_buf:
234         kfree(iu->buf);
235 out_free_iu:
236         kfree(iu);
237 out:
238         return NULL;
239 }
240
241 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
242 {
243         if (!iu)
244                 return;
245
246         ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
247                             iu->direction);
248         kfree(iu->buf);
249         kfree(iu);
250 }
251
252 static void srp_qp_event(struct ib_event *event, void *context)
253 {
254         pr_debug("QP event %s (%d)\n",
255                  ib_event_msg(event->event), event->event);
256 }
257
258 static int srp_init_qp(struct srp_target_port *target,
259                        struct ib_qp *qp)
260 {
261         struct ib_qp_attr *attr;
262         int ret;
263
264         attr = kmalloc(sizeof *attr, GFP_KERNEL);
265         if (!attr)
266                 return -ENOMEM;
267
268         ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
269                                   target->srp_host->port,
270                                   be16_to_cpu(target->pkey),
271                                   &attr->pkey_index);
272         if (ret)
273                 goto out;
274
275         attr->qp_state        = IB_QPS_INIT;
276         attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
277                                     IB_ACCESS_REMOTE_WRITE);
278         attr->port_num        = target->srp_host->port;
279
280         ret = ib_modify_qp(qp, attr,
281                            IB_QP_STATE          |
282                            IB_QP_PKEY_INDEX     |
283                            IB_QP_ACCESS_FLAGS   |
284                            IB_QP_PORT);
285
286 out:
287         kfree(attr);
288         return ret;
289 }
290
291 static int srp_new_cm_id(struct srp_rdma_ch *ch)
292 {
293         struct srp_target_port *target = ch->target;
294         struct ib_cm_id *new_cm_id;
295
296         new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
297                                     srp_cm_handler, ch);
298         if (IS_ERR(new_cm_id))
299                 return PTR_ERR(new_cm_id);
300
301         if (ch->cm_id)
302                 ib_destroy_cm_id(ch->cm_id);
303         ch->cm_id = new_cm_id;
304         ch->path.sgid = target->sgid;
305         ch->path.dgid = target->orig_dgid;
306         ch->path.pkey = target->pkey;
307         ch->path.service_id = target->service_id;
308
309         return 0;
310 }
311
312 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
313 {
314         struct srp_device *dev = target->srp_host->srp_dev;
315         struct ib_fmr_pool_param fmr_param;
316
317         memset(&fmr_param, 0, sizeof(fmr_param));
318         fmr_param.pool_size         = target->scsi_host->can_queue;
319         fmr_param.dirty_watermark   = fmr_param.pool_size / 4;
320         fmr_param.cache             = 1;
321         fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
322         fmr_param.page_shift        = ilog2(dev->mr_page_size);
323         fmr_param.access            = (IB_ACCESS_LOCAL_WRITE |
324                                        IB_ACCESS_REMOTE_WRITE |
325                                        IB_ACCESS_REMOTE_READ);
326
327         return ib_create_fmr_pool(dev->pd, &fmr_param);
328 }
329
330 /**
331  * srp_destroy_fr_pool() - free the resources owned by a pool
332  * @pool: Fast registration pool to be destroyed.
333  */
334 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
335 {
336         int i;
337         struct srp_fr_desc *d;
338
339         if (!pool)
340                 return;
341
342         for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
343                 if (d->frpl)
344                         ib_free_fast_reg_page_list(d->frpl);
345                 if (d->mr)
346                         ib_dereg_mr(d->mr);
347         }
348         kfree(pool);
349 }
350
351 /**
352  * srp_create_fr_pool() - allocate and initialize a pool for fast registration
353  * @device:            IB device to allocate fast registration descriptors for.
354  * @pd:                Protection domain associated with the FR descriptors.
355  * @pool_size:         Number of descriptors to allocate.
356  * @max_page_list_len: Maximum fast registration work request page list length.
357  */
358 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
359                                               struct ib_pd *pd, int pool_size,
360                                               int max_page_list_len)
361 {
362         struct srp_fr_pool *pool;
363         struct srp_fr_desc *d;
364         struct ib_mr *mr;
365         struct ib_fast_reg_page_list *frpl;
366         int i, ret = -EINVAL;
367
368         if (pool_size <= 0)
369                 goto err;
370         ret = -ENOMEM;
371         pool = kzalloc(sizeof(struct srp_fr_pool) +
372                        pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
373         if (!pool)
374                 goto err;
375         pool->size = pool_size;
376         pool->max_page_list_len = max_page_list_len;
377         spin_lock_init(&pool->lock);
378         INIT_LIST_HEAD(&pool->free_list);
379
380         for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
381                 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
382                                  max_page_list_len);
383                 if (IS_ERR(mr)) {
384                         ret = PTR_ERR(mr);
385                         goto destroy_pool;
386                 }
387                 d->mr = mr;
388                 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
389                 if (IS_ERR(frpl)) {
390                         ret = PTR_ERR(frpl);
391                         goto destroy_pool;
392                 }
393                 d->frpl = frpl;
394                 list_add_tail(&d->entry, &pool->free_list);
395         }
396
397 out:
398         return pool;
399
400 destroy_pool:
401         srp_destroy_fr_pool(pool);
402
403 err:
404         pool = ERR_PTR(ret);
405         goto out;
406 }
407
408 /**
409  * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
410  * @pool: Pool to obtain descriptor from.
411  */
412 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
413 {
414         struct srp_fr_desc *d = NULL;
415         unsigned long flags;
416
417         spin_lock_irqsave(&pool->lock, flags);
418         if (!list_empty(&pool->free_list)) {
419                 d = list_first_entry(&pool->free_list, typeof(*d), entry);
420                 list_del(&d->entry);
421         }
422         spin_unlock_irqrestore(&pool->lock, flags);
423
424         return d;
425 }
426
427 /**
428  * srp_fr_pool_put() - put an FR descriptor back in the free list
429  * @pool: Pool the descriptor was allocated from.
430  * @desc: Pointer to an array of fast registration descriptor pointers.
431  * @n:    Number of descriptors to put back.
432  *
433  * Note: The caller must already have queued an invalidation request for
434  * desc->mr->rkey before calling this function.
435  */
436 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
437                             int n)
438 {
439         unsigned long flags;
440         int i;
441
442         spin_lock_irqsave(&pool->lock, flags);
443         for (i = 0; i < n; i++)
444                 list_add(&desc[i]->entry, &pool->free_list);
445         spin_unlock_irqrestore(&pool->lock, flags);
446 }
447
448 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
449 {
450         struct srp_device *dev = target->srp_host->srp_dev;
451
452         return srp_create_fr_pool(dev->dev, dev->pd,
453                                   target->scsi_host->can_queue,
454                                   dev->max_pages_per_mr);
455 }
456
457 /**
458  * srp_destroy_qp() - destroy an RDMA queue pair
459  * @ch: SRP RDMA channel.
460  *
461  * Change a queue pair into the error state and wait until all receive
462  * completions have been processed before destroying it. This avoids that
463  * the receive completion handler can access the queue pair while it is
464  * being destroyed.
465  */
466 static void srp_destroy_qp(struct srp_rdma_ch *ch)
467 {
468         static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
469         static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
470         struct ib_recv_wr *bad_wr;
471         int ret;
472
473         /* Destroying a QP and reusing ch->done is only safe if not connected */
474         WARN_ON_ONCE(ch->connected);
475
476         ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
477         WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
478         if (ret)
479                 goto out;
480
481         init_completion(&ch->done);
482         ret = ib_post_recv(ch->qp, &wr, &bad_wr);
483         WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
484         if (ret == 0)
485                 wait_for_completion(&ch->done);
486
487 out:
488         ib_destroy_qp(ch->qp);
489 }
490
491 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
492 {
493         struct srp_target_port *target = ch->target;
494         struct srp_device *dev = target->srp_host->srp_dev;
495         struct ib_qp_init_attr *init_attr;
496         struct ib_cq *recv_cq, *send_cq;
497         struct ib_qp *qp;
498         struct ib_fmr_pool *fmr_pool = NULL;
499         struct srp_fr_pool *fr_pool = NULL;
500         const int m = 1 + dev->use_fast_reg;
501         struct ib_cq_init_attr cq_attr = {};
502         int ret;
503
504         init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
505         if (!init_attr)
506                 return -ENOMEM;
507
508         /* + 1 for SRP_LAST_WR_ID */
509         cq_attr.cqe = target->queue_size + 1;
510         cq_attr.comp_vector = ch->comp_vector;
511         recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
512                                &cq_attr);
513         if (IS_ERR(recv_cq)) {
514                 ret = PTR_ERR(recv_cq);
515                 goto err;
516         }
517
518         cq_attr.cqe = m * target->queue_size;
519         cq_attr.comp_vector = ch->comp_vector;
520         send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
521                                &cq_attr);
522         if (IS_ERR(send_cq)) {
523                 ret = PTR_ERR(send_cq);
524                 goto err_recv_cq;
525         }
526
527         ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
528
529         init_attr->event_handler       = srp_qp_event;
530         init_attr->cap.max_send_wr     = m * target->queue_size;
531         init_attr->cap.max_recv_wr     = target->queue_size + 1;
532         init_attr->cap.max_recv_sge    = 1;
533         init_attr->cap.max_send_sge    = 1;
534         init_attr->sq_sig_type         = IB_SIGNAL_REQ_WR;
535         init_attr->qp_type             = IB_QPT_RC;
536         init_attr->send_cq             = send_cq;
537         init_attr->recv_cq             = recv_cq;
538
539         qp = ib_create_qp(dev->pd, init_attr);
540         if (IS_ERR(qp)) {
541                 ret = PTR_ERR(qp);
542                 goto err_send_cq;
543         }
544
545         ret = srp_init_qp(target, qp);
546         if (ret)
547                 goto err_qp;
548
549         if (dev->use_fast_reg) {
550                 fr_pool = srp_alloc_fr_pool(target);
551                 if (IS_ERR(fr_pool)) {
552                         ret = PTR_ERR(fr_pool);
553                         shost_printk(KERN_WARNING, target->scsi_host, PFX
554                                      "FR pool allocation failed (%d)\n", ret);
555                         goto err_qp;
556                 }
557                 if (ch->fr_pool)
558                         srp_destroy_fr_pool(ch->fr_pool);
559                 ch->fr_pool = fr_pool;
560         } else if (dev->use_fmr) {
561                 fmr_pool = srp_alloc_fmr_pool(target);
562                 if (IS_ERR(fmr_pool)) {
563                         ret = PTR_ERR(fmr_pool);
564                         shost_printk(KERN_WARNING, target->scsi_host, PFX
565                                      "FMR pool allocation failed (%d)\n", ret);
566                         goto err_qp;
567                 }
568                 if (ch->fmr_pool)
569                         ib_destroy_fmr_pool(ch->fmr_pool);
570                 ch->fmr_pool = fmr_pool;
571         }
572
573         if (ch->qp)
574                 srp_destroy_qp(ch);
575         if (ch->recv_cq)
576                 ib_destroy_cq(ch->recv_cq);
577         if (ch->send_cq)
578                 ib_destroy_cq(ch->send_cq);
579
580         ch->qp = qp;
581         ch->recv_cq = recv_cq;
582         ch->send_cq = send_cq;
583
584         kfree(init_attr);
585         return 0;
586
587 err_qp:
588         ib_destroy_qp(qp);
589
590 err_send_cq:
591         ib_destroy_cq(send_cq);
592
593 err_recv_cq:
594         ib_destroy_cq(recv_cq);
595
596 err:
597         kfree(init_attr);
598         return ret;
599 }
600
601 /*
602  * Note: this function may be called without srp_alloc_iu_bufs() having been
603  * invoked. Hence the ch->[rt]x_ring checks.
604  */
605 static void srp_free_ch_ib(struct srp_target_port *target,
606                            struct srp_rdma_ch *ch)
607 {
608         struct srp_device *dev = target->srp_host->srp_dev;
609         int i;
610
611         if (!ch->target)
612                 return;
613
614         if (ch->cm_id) {
615                 ib_destroy_cm_id(ch->cm_id);
616                 ch->cm_id = NULL;
617         }
618
619         /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
620         if (!ch->qp)
621                 return;
622
623         if (dev->use_fast_reg) {
624                 if (ch->fr_pool)
625                         srp_destroy_fr_pool(ch->fr_pool);
626         } else if (dev->use_fmr) {
627                 if (ch->fmr_pool)
628                         ib_destroy_fmr_pool(ch->fmr_pool);
629         }
630         srp_destroy_qp(ch);
631         ib_destroy_cq(ch->send_cq);
632         ib_destroy_cq(ch->recv_cq);
633
634         /*
635          * Avoid that the SCSI error handler tries to use this channel after
636          * it has been freed. The SCSI error handler can namely continue
637          * trying to perform recovery actions after scsi_remove_host()
638          * returned.
639          */
640         ch->target = NULL;
641
642         ch->qp = NULL;
643         ch->send_cq = ch->recv_cq = NULL;
644
645         if (ch->rx_ring) {
646                 for (i = 0; i < target->queue_size; ++i)
647                         srp_free_iu(target->srp_host, ch->rx_ring[i]);
648                 kfree(ch->rx_ring);
649                 ch->rx_ring = NULL;
650         }
651         if (ch->tx_ring) {
652                 for (i = 0; i < target->queue_size; ++i)
653                         srp_free_iu(target->srp_host, ch->tx_ring[i]);
654                 kfree(ch->tx_ring);
655                 ch->tx_ring = NULL;
656         }
657 }
658
659 static void srp_path_rec_completion(int status,
660                                     struct ib_sa_path_rec *pathrec,
661                                     void *ch_ptr)
662 {
663         struct srp_rdma_ch *ch = ch_ptr;
664         struct srp_target_port *target = ch->target;
665
666         ch->status = status;
667         if (status)
668                 shost_printk(KERN_ERR, target->scsi_host,
669                              PFX "Got failed path rec status %d\n", status);
670         else
671                 ch->path = *pathrec;
672         complete(&ch->done);
673 }
674
675 static int srp_lookup_path(struct srp_rdma_ch *ch)
676 {
677         struct srp_target_port *target = ch->target;
678         int ret;
679
680         ch->path.numb_path = 1;
681
682         init_completion(&ch->done);
683
684         ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
685                                                target->srp_host->srp_dev->dev,
686                                                target->srp_host->port,
687                                                &ch->path,
688                                                IB_SA_PATH_REC_SERVICE_ID |
689                                                IB_SA_PATH_REC_DGID       |
690                                                IB_SA_PATH_REC_SGID       |
691                                                IB_SA_PATH_REC_NUMB_PATH  |
692                                                IB_SA_PATH_REC_PKEY,
693                                                SRP_PATH_REC_TIMEOUT_MS,
694                                                GFP_KERNEL,
695                                                srp_path_rec_completion,
696                                                ch, &ch->path_query);
697         if (ch->path_query_id < 0)
698                 return ch->path_query_id;
699
700         ret = wait_for_completion_interruptible(&ch->done);
701         if (ret < 0)
702                 return ret;
703
704         if (ch->status < 0)
705                 shost_printk(KERN_WARNING, target->scsi_host,
706                              PFX "Path record query failed\n");
707
708         return ch->status;
709 }
710
711 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
712 {
713         struct srp_target_port *target = ch->target;
714         struct {
715                 struct ib_cm_req_param param;
716                 struct srp_login_req   priv;
717         } *req = NULL;
718         int status;
719
720         req = kzalloc(sizeof *req, GFP_KERNEL);
721         if (!req)
722                 return -ENOMEM;
723
724         req->param.primary_path               = &ch->path;
725         req->param.alternate_path             = NULL;
726         req->param.service_id                 = target->service_id;
727         req->param.qp_num                     = ch->qp->qp_num;
728         req->param.qp_type                    = ch->qp->qp_type;
729         req->param.private_data               = &req->priv;
730         req->param.private_data_len           = sizeof req->priv;
731         req->param.flow_control               = 1;
732
733         get_random_bytes(&req->param.starting_psn, 4);
734         req->param.starting_psn              &= 0xffffff;
735
736         /*
737          * Pick some arbitrary defaults here; we could make these
738          * module parameters if anyone cared about setting them.
739          */
740         req->param.responder_resources        = 4;
741         req->param.remote_cm_response_timeout = 20;
742         req->param.local_cm_response_timeout  = 20;
743         req->param.retry_count                = target->tl_retry_count;
744         req->param.rnr_retry_count            = 7;
745         req->param.max_cm_retries             = 15;
746
747         req->priv.opcode        = SRP_LOGIN_REQ;
748         req->priv.tag           = 0;
749         req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
750         req->priv.req_buf_fmt   = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
751                                               SRP_BUF_FORMAT_INDIRECT);
752         req->priv.req_flags     = (multich ? SRP_MULTICHAN_MULTI :
753                                    SRP_MULTICHAN_SINGLE);
754         /*
755          * In the published SRP specification (draft rev. 16a), the
756          * port identifier format is 8 bytes of ID extension followed
757          * by 8 bytes of GUID.  Older drafts put the two halves in the
758          * opposite order, so that the GUID comes first.
759          *
760          * Targets conforming to these obsolete drafts can be
761          * recognized by the I/O Class they report.
762          */
763         if (target->io_class == SRP_REV10_IB_IO_CLASS) {
764                 memcpy(req->priv.initiator_port_id,
765                        &target->sgid.global.interface_id, 8);
766                 memcpy(req->priv.initiator_port_id + 8,
767                        &target->initiator_ext, 8);
768                 memcpy(req->priv.target_port_id,     &target->ioc_guid, 8);
769                 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
770         } else {
771                 memcpy(req->priv.initiator_port_id,
772                        &target->initiator_ext, 8);
773                 memcpy(req->priv.initiator_port_id + 8,
774                        &target->sgid.global.interface_id, 8);
775                 memcpy(req->priv.target_port_id,     &target->id_ext, 8);
776                 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
777         }
778
779         /*
780          * Topspin/Cisco SRP targets will reject our login unless we
781          * zero out the first 8 bytes of our initiator port ID and set
782          * the second 8 bytes to the local node GUID.
783          */
784         if (srp_target_is_topspin(target)) {
785                 shost_printk(KERN_DEBUG, target->scsi_host,
786                              PFX "Topspin/Cisco initiator port ID workaround "
787                              "activated for target GUID %016llx\n",
788                              be64_to_cpu(target->ioc_guid));
789                 memset(req->priv.initiator_port_id, 0, 8);
790                 memcpy(req->priv.initiator_port_id + 8,
791                        &target->srp_host->srp_dev->dev->node_guid, 8);
792         }
793
794         status = ib_send_cm_req(ch->cm_id, &req->param);
795
796         kfree(req);
797
798         return status;
799 }
800
801 static bool srp_queue_remove_work(struct srp_target_port *target)
802 {
803         bool changed = false;
804
805         spin_lock_irq(&target->lock);
806         if (target->state != SRP_TARGET_REMOVED) {
807                 target->state = SRP_TARGET_REMOVED;
808                 changed = true;
809         }
810         spin_unlock_irq(&target->lock);
811
812         if (changed)
813                 queue_work(srp_remove_wq, &target->remove_work);
814
815         return changed;
816 }
817
818 static void srp_disconnect_target(struct srp_target_port *target)
819 {
820         struct srp_rdma_ch *ch;
821         int i;
822
823         /* XXX should send SRP_I_LOGOUT request */
824
825         for (i = 0; i < target->ch_count; i++) {
826                 ch = &target->ch[i];
827                 ch->connected = false;
828                 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
829                         shost_printk(KERN_DEBUG, target->scsi_host,
830                                      PFX "Sending CM DREQ failed\n");
831                 }
832         }
833 }
834
835 static void srp_free_req_data(struct srp_target_port *target,
836                               struct srp_rdma_ch *ch)
837 {
838         struct srp_device *dev = target->srp_host->srp_dev;
839         struct ib_device *ibdev = dev->dev;
840         struct srp_request *req;
841         int i;
842
843         if (!ch->req_ring)
844                 return;
845
846         for (i = 0; i < target->req_ring_size; ++i) {
847                 req = &ch->req_ring[i];
848                 if (dev->use_fast_reg)
849                         kfree(req->fr_list);
850                 else
851                         kfree(req->fmr_list);
852                 kfree(req->map_page);
853                 if (req->indirect_dma_addr) {
854                         ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
855                                             target->indirect_size,
856                                             DMA_TO_DEVICE);
857                 }
858                 kfree(req->indirect_desc);
859         }
860
861         kfree(ch->req_ring);
862         ch->req_ring = NULL;
863 }
864
865 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
866 {
867         struct srp_target_port *target = ch->target;
868         struct srp_device *srp_dev = target->srp_host->srp_dev;
869         struct ib_device *ibdev = srp_dev->dev;
870         struct srp_request *req;
871         void *mr_list;
872         dma_addr_t dma_addr;
873         int i, ret = -ENOMEM;
874
875         ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
876                                GFP_KERNEL);
877         if (!ch->req_ring)
878                 goto out;
879
880         for (i = 0; i < target->req_ring_size; ++i) {
881                 req = &ch->req_ring[i];
882                 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
883                                   GFP_KERNEL);
884                 if (!mr_list)
885                         goto out;
886                 if (srp_dev->use_fast_reg)
887                         req->fr_list = mr_list;
888                 else
889                         req->fmr_list = mr_list;
890                 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
891                                         sizeof(void *), GFP_KERNEL);
892                 if (!req->map_page)
893                         goto out;
894                 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
895                 if (!req->indirect_desc)
896                         goto out;
897
898                 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
899                                              target->indirect_size,
900                                              DMA_TO_DEVICE);
901                 if (ib_dma_mapping_error(ibdev, dma_addr))
902                         goto out;
903
904                 req->indirect_dma_addr = dma_addr;
905         }
906         ret = 0;
907
908 out:
909         return ret;
910 }
911
912 /**
913  * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
914  * @shost: SCSI host whose attributes to remove from sysfs.
915  *
916  * Note: Any attributes defined in the host template and that did not exist
917  * before invocation of this function will be ignored.
918  */
919 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
920 {
921         struct device_attribute **attr;
922
923         for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
924                 device_remove_file(&shost->shost_dev, *attr);
925 }
926
927 static void srp_remove_target(struct srp_target_port *target)
928 {
929         struct srp_rdma_ch *ch;
930         int i;
931
932         WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
933
934         srp_del_scsi_host_attr(target->scsi_host);
935         srp_rport_get(target->rport);
936         srp_remove_host(target->scsi_host);
937         scsi_remove_host(target->scsi_host);
938         srp_stop_rport_timers(target->rport);
939         srp_disconnect_target(target);
940         for (i = 0; i < target->ch_count; i++) {
941                 ch = &target->ch[i];
942                 srp_free_ch_ib(target, ch);
943         }
944         cancel_work_sync(&target->tl_err_work);
945         srp_rport_put(target->rport);
946         for (i = 0; i < target->ch_count; i++) {
947                 ch = &target->ch[i];
948                 srp_free_req_data(target, ch);
949         }
950         kfree(target->ch);
951         target->ch = NULL;
952
953         spin_lock(&target->srp_host->target_lock);
954         list_del(&target->list);
955         spin_unlock(&target->srp_host->target_lock);
956
957         scsi_host_put(target->scsi_host);
958 }
959
960 static void srp_remove_work(struct work_struct *work)
961 {
962         struct srp_target_port *target =
963                 container_of(work, struct srp_target_port, remove_work);
964
965         WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
966
967         srp_remove_target(target);
968 }
969
970 static void srp_rport_delete(struct srp_rport *rport)
971 {
972         struct srp_target_port *target = rport->lld_data;
973
974         srp_queue_remove_work(target);
975 }
976
977 /**
978  * srp_connected_ch() - number of connected channels
979  * @target: SRP target port.
980  */
981 static int srp_connected_ch(struct srp_target_port *target)
982 {
983         int i, c = 0;
984
985         for (i = 0; i < target->ch_count; i++)
986                 c += target->ch[i].connected;
987
988         return c;
989 }
990
991 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
992 {
993         struct srp_target_port *target = ch->target;
994         int ret;
995
996         WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
997
998         ret = srp_lookup_path(ch);
999         if (ret)
1000                 return ret;
1001
1002         while (1) {
1003                 init_completion(&ch->done);
1004                 ret = srp_send_req(ch, multich);
1005                 if (ret)
1006                         return ret;
1007                 ret = wait_for_completion_interruptible(&ch->done);
1008                 if (ret < 0)
1009                         return ret;
1010
1011                 /*
1012                  * The CM event handling code will set status to
1013                  * SRP_PORT_REDIRECT if we get a port redirect REJ
1014                  * back, or SRP_DLID_REDIRECT if we get a lid/qp
1015                  * redirect REJ back.
1016                  */
1017                 switch (ch->status) {
1018                 case 0:
1019                         ch->connected = true;
1020                         return 0;
1021
1022                 case SRP_PORT_REDIRECT:
1023                         ret = srp_lookup_path(ch);
1024                         if (ret)
1025                                 return ret;
1026                         break;
1027
1028                 case SRP_DLID_REDIRECT:
1029                         break;
1030
1031                 case SRP_STALE_CONN:
1032                         shost_printk(KERN_ERR, target->scsi_host, PFX
1033                                      "giving up on stale connection\n");
1034                         ch->status = -ECONNRESET;
1035                         return ch->status;
1036
1037                 default:
1038                         return ch->status;
1039                 }
1040         }
1041 }
1042
1043 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1044 {
1045         struct ib_send_wr *bad_wr;
1046         struct ib_send_wr wr = {
1047                 .opcode             = IB_WR_LOCAL_INV,
1048                 .wr_id              = LOCAL_INV_WR_ID_MASK,
1049                 .next               = NULL,
1050                 .num_sge            = 0,
1051                 .send_flags         = 0,
1052                 .ex.invalidate_rkey = rkey,
1053         };
1054
1055         return ib_post_send(ch->qp, &wr, &bad_wr);
1056 }
1057
1058 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1059                            struct srp_rdma_ch *ch,
1060                            struct srp_request *req)
1061 {
1062         struct srp_target_port *target = ch->target;
1063         struct srp_device *dev = target->srp_host->srp_dev;
1064         struct ib_device *ibdev = dev->dev;
1065         int i, res;
1066
1067         if (!scsi_sglist(scmnd) ||
1068             (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1069              scmnd->sc_data_direction != DMA_FROM_DEVICE))
1070                 return;
1071
1072         if (dev->use_fast_reg) {
1073                 struct srp_fr_desc **pfr;
1074
1075                 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1076                         res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1077                         if (res < 0) {
1078                                 shost_printk(KERN_ERR, target->scsi_host, PFX
1079                                   "Queueing INV WR for rkey %#x failed (%d)\n",
1080                                   (*pfr)->mr->rkey, res);
1081                                 queue_work(system_long_wq,
1082                                            &target->tl_err_work);
1083                         }
1084                 }
1085                 if (req->nmdesc)
1086                         srp_fr_pool_put(ch->fr_pool, req->fr_list,
1087                                         req->nmdesc);
1088         } else if (dev->use_fmr) {
1089                 struct ib_pool_fmr **pfmr;
1090
1091                 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1092                         ib_fmr_pool_unmap(*pfmr);
1093         }
1094
1095         ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1096                         scmnd->sc_data_direction);
1097 }
1098
1099 /**
1100  * srp_claim_req - Take ownership of the scmnd associated with a request.
1101  * @ch: SRP RDMA channel.
1102  * @req: SRP request.
1103  * @sdev: If not NULL, only take ownership for this SCSI device.
1104  * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1105  *         ownership of @req->scmnd if it equals @scmnd.
1106  *
1107  * Return value:
1108  * Either NULL or a pointer to the SCSI command the caller became owner of.
1109  */
1110 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1111                                        struct srp_request *req,
1112                                        struct scsi_device *sdev,
1113                                        struct scsi_cmnd *scmnd)
1114 {
1115         unsigned long flags;
1116
1117         spin_lock_irqsave(&ch->lock, flags);
1118         if (req->scmnd &&
1119             (!sdev || req->scmnd->device == sdev) &&
1120             (!scmnd || req->scmnd == scmnd)) {
1121                 scmnd = req->scmnd;
1122                 req->scmnd = NULL;
1123         } else {
1124                 scmnd = NULL;
1125         }
1126         spin_unlock_irqrestore(&ch->lock, flags);
1127
1128         return scmnd;
1129 }
1130
1131 /**
1132  * srp_free_req() - Unmap data and add request to the free request list.
1133  * @ch:     SRP RDMA channel.
1134  * @req:    Request to be freed.
1135  * @scmnd:  SCSI command associated with @req.
1136  * @req_lim_delta: Amount to be added to @target->req_lim.
1137  */
1138 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1139                          struct scsi_cmnd *scmnd, s32 req_lim_delta)
1140 {
1141         unsigned long flags;
1142
1143         srp_unmap_data(scmnd, ch, req);
1144
1145         spin_lock_irqsave(&ch->lock, flags);
1146         ch->req_lim += req_lim_delta;
1147         spin_unlock_irqrestore(&ch->lock, flags);
1148 }
1149
1150 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1151                            struct scsi_device *sdev, int result)
1152 {
1153         struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1154
1155         if (scmnd) {
1156                 srp_free_req(ch, req, scmnd, 0);
1157                 scmnd->result = result;
1158                 scmnd->scsi_done(scmnd);
1159         }
1160 }
1161
1162 static void srp_terminate_io(struct srp_rport *rport)
1163 {
1164         struct srp_target_port *target = rport->lld_data;
1165         struct srp_rdma_ch *ch;
1166         struct Scsi_Host *shost = target->scsi_host;
1167         struct scsi_device *sdev;
1168         int i, j;
1169
1170         /*
1171          * Invoking srp_terminate_io() while srp_queuecommand() is running
1172          * is not safe. Hence the warning statement below.
1173          */
1174         shost_for_each_device(sdev, shost)
1175                 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1176
1177         for (i = 0; i < target->ch_count; i++) {
1178                 ch = &target->ch[i];
1179
1180                 for (j = 0; j < target->req_ring_size; ++j) {
1181                         struct srp_request *req = &ch->req_ring[j];
1182
1183                         srp_finish_req(ch, req, NULL,
1184                                        DID_TRANSPORT_FAILFAST << 16);
1185                 }
1186         }
1187 }
1188
1189 /*
1190  * It is up to the caller to ensure that srp_rport_reconnect() calls are
1191  * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1192  * srp_reset_device() or srp_reset_host() calls will occur while this function
1193  * is in progress. One way to realize that is not to call this function
1194  * directly but to call srp_reconnect_rport() instead since that last function
1195  * serializes calls of this function via rport->mutex and also blocks
1196  * srp_queuecommand() calls before invoking this function.
1197  */
1198 static int srp_rport_reconnect(struct srp_rport *rport)
1199 {
1200         struct srp_target_port *target = rport->lld_data;
1201         struct srp_rdma_ch *ch;
1202         int i, j, ret = 0;
1203         bool multich = false;
1204
1205         srp_disconnect_target(target);
1206
1207         if (target->state == SRP_TARGET_SCANNING)
1208                 return -ENODEV;
1209
1210         /*
1211          * Now get a new local CM ID so that we avoid confusing the target in
1212          * case things are really fouled up. Doing so also ensures that all CM
1213          * callbacks will have finished before a new QP is allocated.
1214          */
1215         for (i = 0; i < target->ch_count; i++) {
1216                 ch = &target->ch[i];
1217                 ret += srp_new_cm_id(ch);
1218         }
1219         for (i = 0; i < target->ch_count; i++) {
1220                 ch = &target->ch[i];
1221                 for (j = 0; j < target->req_ring_size; ++j) {
1222                         struct srp_request *req = &ch->req_ring[j];
1223
1224                         srp_finish_req(ch, req, NULL, DID_RESET << 16);
1225                 }
1226         }
1227         for (i = 0; i < target->ch_count; i++) {
1228                 ch = &target->ch[i];
1229                 /*
1230                  * Whether or not creating a new CM ID succeeded, create a new
1231                  * QP. This guarantees that all completion callback function
1232                  * invocations have finished before request resetting starts.
1233                  */
1234                 ret += srp_create_ch_ib(ch);
1235
1236                 INIT_LIST_HEAD(&ch->free_tx);
1237                 for (j = 0; j < target->queue_size; ++j)
1238                         list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1239         }
1240
1241         target->qp_in_error = false;
1242
1243         for (i = 0; i < target->ch_count; i++) {
1244                 ch = &target->ch[i];
1245                 if (ret)
1246                         break;
1247                 ret = srp_connect_ch(ch, multich);
1248                 multich = true;
1249         }
1250
1251         if (ret == 0)
1252                 shost_printk(KERN_INFO, target->scsi_host,
1253                              PFX "reconnect succeeded\n");
1254
1255         return ret;
1256 }
1257
1258 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1259                          unsigned int dma_len, u32 rkey)
1260 {
1261         struct srp_direct_buf *desc = state->desc;
1262
1263         WARN_ON_ONCE(!dma_len);
1264
1265         desc->va = cpu_to_be64(dma_addr);
1266         desc->key = cpu_to_be32(rkey);
1267         desc->len = cpu_to_be32(dma_len);
1268
1269         state->total_len += dma_len;
1270         state->desc++;
1271         state->ndesc++;
1272 }
1273
1274 static int srp_map_finish_fmr(struct srp_map_state *state,
1275                               struct srp_rdma_ch *ch)
1276 {
1277         struct srp_target_port *target = ch->target;
1278         struct srp_device *dev = target->srp_host->srp_dev;
1279         struct ib_pool_fmr *fmr;
1280         u64 io_addr = 0;
1281
1282         if (state->fmr.next >= state->fmr.end)
1283                 return -ENOMEM;
1284
1285         fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1286                                    state->npages, io_addr);
1287         if (IS_ERR(fmr))
1288                 return PTR_ERR(fmr);
1289
1290         *state->fmr.next++ = fmr;
1291         state->nmdesc++;
1292
1293         srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1294                      state->dma_len, fmr->fmr->rkey);
1295
1296         return 0;
1297 }
1298
1299 static int srp_map_finish_fr(struct srp_map_state *state,
1300                              struct srp_rdma_ch *ch)
1301 {
1302         struct srp_target_port *target = ch->target;
1303         struct srp_device *dev = target->srp_host->srp_dev;
1304         struct ib_send_wr *bad_wr;
1305         struct ib_send_wr wr;
1306         struct srp_fr_desc *desc;
1307         u32 rkey;
1308
1309         if (state->fr.next >= state->fr.end)
1310                 return -ENOMEM;
1311
1312         desc = srp_fr_pool_get(ch->fr_pool);
1313         if (!desc)
1314                 return -ENOMEM;
1315
1316         rkey = ib_inc_rkey(desc->mr->rkey);
1317         ib_update_fast_reg_key(desc->mr, rkey);
1318
1319         memcpy(desc->frpl->page_list, state->pages,
1320                sizeof(state->pages[0]) * state->npages);
1321
1322         memset(&wr, 0, sizeof(wr));
1323         wr.opcode = IB_WR_FAST_REG_MR;
1324         wr.wr_id = FAST_REG_WR_ID_MASK;
1325         wr.wr.fast_reg.iova_start = state->base_dma_addr;
1326         wr.wr.fast_reg.page_list = desc->frpl;
1327         wr.wr.fast_reg.page_list_len = state->npages;
1328         wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1329         wr.wr.fast_reg.length = state->dma_len;
1330         wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1331                                        IB_ACCESS_REMOTE_READ |
1332                                        IB_ACCESS_REMOTE_WRITE);
1333         wr.wr.fast_reg.rkey = desc->mr->lkey;
1334
1335         *state->fr.next++ = desc;
1336         state->nmdesc++;
1337
1338         srp_map_desc(state, state->base_dma_addr, state->dma_len,
1339                      desc->mr->rkey);
1340
1341         return ib_post_send(ch->qp, &wr, &bad_wr);
1342 }
1343
1344 static int srp_finish_mapping(struct srp_map_state *state,
1345                               struct srp_rdma_ch *ch)
1346 {
1347         struct srp_target_port *target = ch->target;
1348         struct srp_device *dev = target->srp_host->srp_dev;
1349         int ret = 0;
1350
1351         WARN_ON_ONCE(!dev->use_fast_reg && !dev->use_fmr);
1352
1353         if (state->npages == 0)
1354                 return 0;
1355
1356         if (state->npages == 1 && !register_always)
1357                 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1358                              target->rkey);
1359         else
1360                 ret = dev->use_fast_reg ? srp_map_finish_fr(state, ch) :
1361                         srp_map_finish_fmr(state, ch);
1362
1363         if (ret == 0) {
1364                 state->npages = 0;
1365                 state->dma_len = 0;
1366         }
1367
1368         return ret;
1369 }
1370
1371 static int srp_map_sg_entry(struct srp_map_state *state,
1372                             struct srp_rdma_ch *ch,
1373                             struct scatterlist *sg, int sg_index)
1374 {
1375         struct srp_target_port *target = ch->target;
1376         struct srp_device *dev = target->srp_host->srp_dev;
1377         struct ib_device *ibdev = dev->dev;
1378         dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1379         unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1380         unsigned int len = 0;
1381         int ret;
1382
1383         WARN_ON_ONCE(!dma_len);
1384
1385         while (dma_len) {
1386                 unsigned offset = dma_addr & ~dev->mr_page_mask;
1387                 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1388                         ret = srp_finish_mapping(state, ch);
1389                         if (ret)
1390                                 return ret;
1391                 }
1392
1393                 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1394
1395                 if (!state->npages)
1396                         state->base_dma_addr = dma_addr;
1397                 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1398                 state->dma_len += len;
1399                 dma_addr += len;
1400                 dma_len -= len;
1401         }
1402
1403         /*
1404          * If the last entry of the MR wasn't a full page, then we need to
1405          * close it out and start a new one -- we can only merge at page
1406          * boundries.
1407          */
1408         ret = 0;
1409         if (len != dev->mr_page_size)
1410                 ret = srp_finish_mapping(state, ch);
1411         return ret;
1412 }
1413
1414 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1415                       struct srp_request *req, struct scatterlist *scat,
1416                       int count)
1417 {
1418         struct srp_target_port *target = ch->target;
1419         struct srp_device *dev = target->srp_host->srp_dev;
1420         struct scatterlist *sg;
1421         int i, ret;
1422
1423         state->desc     = req->indirect_desc;
1424         state->pages    = req->map_page;
1425         if (dev->use_fast_reg) {
1426                 state->fr.next = req->fr_list;
1427                 state->fr.end = req->fr_list + target->cmd_sg_cnt;
1428         } else if (dev->use_fmr) {
1429                 state->fmr.next = req->fmr_list;
1430                 state->fmr.end = req->fmr_list + target->cmd_sg_cnt;
1431         }
1432
1433         if (dev->use_fast_reg || dev->use_fmr) {
1434                 for_each_sg(scat, sg, count, i) {
1435                         ret = srp_map_sg_entry(state, ch, sg, i);
1436                         if (ret)
1437                                 goto out;
1438                 }
1439                 ret = srp_finish_mapping(state, ch);
1440                 if (ret)
1441                         goto out;
1442         } else {
1443                 for_each_sg(scat, sg, count, i) {
1444                         srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1445                                      ib_sg_dma_len(dev->dev, sg), target->rkey);
1446                 }
1447         }
1448
1449         req->nmdesc = state->nmdesc;
1450         ret = 0;
1451
1452 out:
1453         return ret;
1454 }
1455
1456 /*
1457  * Register the indirect data buffer descriptor with the HCA.
1458  *
1459  * Note: since the indirect data buffer descriptor has been allocated with
1460  * kmalloc() it is guaranteed that this buffer is a physically contiguous
1461  * memory buffer.
1462  */
1463 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1464                        void **next_mr, void **end_mr, u32 idb_len,
1465                        __be32 *idb_rkey)
1466 {
1467         struct srp_target_port *target = ch->target;
1468         struct srp_device *dev = target->srp_host->srp_dev;
1469         struct srp_map_state state;
1470         struct srp_direct_buf idb_desc;
1471         u64 idb_pages[1];
1472         int ret;
1473
1474         memset(&state, 0, sizeof(state));
1475         memset(&idb_desc, 0, sizeof(idb_desc));
1476         state.gen.next = next_mr;
1477         state.gen.end = end_mr;
1478         state.desc = &idb_desc;
1479         state.pages = idb_pages;
1480         state.pages[0] = (req->indirect_dma_addr &
1481                           dev->mr_page_mask);
1482         state.npages = 1;
1483         state.base_dma_addr = req->indirect_dma_addr;
1484         state.dma_len = idb_len;
1485         ret = srp_finish_mapping(&state, ch);
1486         if (ret < 0)
1487                 goto out;
1488
1489         *idb_rkey = idb_desc.key;
1490
1491 out:
1492         return ret;
1493 }
1494
1495 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1496                         struct srp_request *req)
1497 {
1498         struct srp_target_port *target = ch->target;
1499         struct scatterlist *scat;
1500         struct srp_cmd *cmd = req->cmd->buf;
1501         int len, nents, count, ret;
1502         struct srp_device *dev;
1503         struct ib_device *ibdev;
1504         struct srp_map_state state;
1505         struct srp_indirect_buf *indirect_hdr;
1506         u32 idb_len, table_len;
1507         __be32 idb_rkey;
1508         u8 fmt;
1509
1510         if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1511                 return sizeof (struct srp_cmd);
1512
1513         if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1514             scmnd->sc_data_direction != DMA_TO_DEVICE) {
1515                 shost_printk(KERN_WARNING, target->scsi_host,
1516                              PFX "Unhandled data direction %d\n",
1517                              scmnd->sc_data_direction);
1518                 return -EINVAL;
1519         }
1520
1521         nents = scsi_sg_count(scmnd);
1522         scat  = scsi_sglist(scmnd);
1523
1524         dev = target->srp_host->srp_dev;
1525         ibdev = dev->dev;
1526
1527         count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1528         if (unlikely(count == 0))
1529                 return -EIO;
1530
1531         fmt = SRP_DATA_DESC_DIRECT;
1532         len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1533
1534         if (count == 1 && !register_always) {
1535                 /*
1536                  * The midlayer only generated a single gather/scatter
1537                  * entry, or DMA mapping coalesced everything to a
1538                  * single entry.  So a direct descriptor along with
1539                  * the DMA MR suffices.
1540                  */
1541                 struct srp_direct_buf *buf = (void *) cmd->add_data;
1542
1543                 buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1544                 buf->key = cpu_to_be32(target->rkey);
1545                 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1546
1547                 req->nmdesc = 0;
1548                 goto map_complete;
1549         }
1550
1551         /*
1552          * We have more than one scatter/gather entry, so build our indirect
1553          * descriptor table, trying to merge as many entries as we can.
1554          */
1555         indirect_hdr = (void *) cmd->add_data;
1556
1557         ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1558                                    target->indirect_size, DMA_TO_DEVICE);
1559
1560         memset(&state, 0, sizeof(state));
1561         srp_map_sg(&state, ch, req, scat, count);
1562
1563         /* We've mapped the request, now pull as much of the indirect
1564          * descriptor table as we can into the command buffer. If this
1565          * target is not using an external indirect table, we are
1566          * guaranteed to fit into the command, as the SCSI layer won't
1567          * give us more S/G entries than we allow.
1568          */
1569         if (state.ndesc == 1) {
1570                 /*
1571                  * Memory registration collapsed the sg-list into one entry,
1572                  * so use a direct descriptor.
1573                  */
1574                 struct srp_direct_buf *buf = (void *) cmd->add_data;
1575
1576                 *buf = req->indirect_desc[0];
1577                 goto map_complete;
1578         }
1579
1580         if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1581                                                 !target->allow_ext_sg)) {
1582                 shost_printk(KERN_ERR, target->scsi_host,
1583                              "Could not fit S/G list into SRP_CMD\n");
1584                 return -EIO;
1585         }
1586
1587         count = min(state.ndesc, target->cmd_sg_cnt);
1588         table_len = state.ndesc * sizeof (struct srp_direct_buf);
1589         idb_len = sizeof(struct srp_indirect_buf) + table_len;
1590
1591         fmt = SRP_DATA_DESC_INDIRECT;
1592         len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1593         len += count * sizeof (struct srp_direct_buf);
1594
1595         memcpy(indirect_hdr->desc_list, req->indirect_desc,
1596                count * sizeof (struct srp_direct_buf));
1597
1598         if (register_always && (dev->use_fast_reg || dev->use_fmr)) {
1599                 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1600                                   idb_len, &idb_rkey);
1601                 if (ret < 0)
1602                         return ret;
1603                 req->nmdesc++;
1604         } else {
1605                 idb_rkey = target->rkey;
1606         }
1607
1608         indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1609         indirect_hdr->table_desc.key = idb_rkey;
1610         indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1611         indirect_hdr->len = cpu_to_be32(state.total_len);
1612
1613         if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1614                 cmd->data_out_desc_cnt = count;
1615         else
1616                 cmd->data_in_desc_cnt = count;
1617
1618         ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1619                                       DMA_TO_DEVICE);
1620
1621 map_complete:
1622         if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1623                 cmd->buf_fmt = fmt << 4;
1624         else
1625                 cmd->buf_fmt = fmt;
1626
1627         return len;
1628 }
1629
1630 /*
1631  * Return an IU and possible credit to the free pool
1632  */
1633 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1634                           enum srp_iu_type iu_type)
1635 {
1636         unsigned long flags;
1637
1638         spin_lock_irqsave(&ch->lock, flags);
1639         list_add(&iu->list, &ch->free_tx);
1640         if (iu_type != SRP_IU_RSP)
1641                 ++ch->req_lim;
1642         spin_unlock_irqrestore(&ch->lock, flags);
1643 }
1644
1645 /*
1646  * Must be called with ch->lock held to protect req_lim and free_tx.
1647  * If IU is not sent, it must be returned using srp_put_tx_iu().
1648  *
1649  * Note:
1650  * An upper limit for the number of allocated information units for each
1651  * request type is:
1652  * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1653  *   more than Scsi_Host.can_queue requests.
1654  * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1655  * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1656  *   one unanswered SRP request to an initiator.
1657  */
1658 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1659                                       enum srp_iu_type iu_type)
1660 {
1661         struct srp_target_port *target = ch->target;
1662         s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1663         struct srp_iu *iu;
1664
1665         srp_send_completion(ch->send_cq, ch);
1666
1667         if (list_empty(&ch->free_tx))
1668                 return NULL;
1669
1670         /* Initiator responses to target requests do not consume credits */
1671         if (iu_type != SRP_IU_RSP) {
1672                 if (ch->req_lim <= rsv) {
1673                         ++target->zero_req_lim;
1674                         return NULL;
1675                 }
1676
1677                 --ch->req_lim;
1678         }
1679
1680         iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1681         list_del(&iu->list);
1682         return iu;
1683 }
1684
1685 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1686 {
1687         struct srp_target_port *target = ch->target;
1688         struct ib_sge list;
1689         struct ib_send_wr wr, *bad_wr;
1690
1691         list.addr   = iu->dma;
1692         list.length = len;
1693         list.lkey   = target->lkey;
1694
1695         wr.next       = NULL;
1696         wr.wr_id      = (uintptr_t) iu;
1697         wr.sg_list    = &list;
1698         wr.num_sge    = 1;
1699         wr.opcode     = IB_WR_SEND;
1700         wr.send_flags = IB_SEND_SIGNALED;
1701
1702         return ib_post_send(ch->qp, &wr, &bad_wr);
1703 }
1704
1705 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1706 {
1707         struct srp_target_port *target = ch->target;
1708         struct ib_recv_wr wr, *bad_wr;
1709         struct ib_sge list;
1710
1711         list.addr   = iu->dma;
1712         list.length = iu->size;
1713         list.lkey   = target->lkey;
1714
1715         wr.next     = NULL;
1716         wr.wr_id    = (uintptr_t) iu;
1717         wr.sg_list  = &list;
1718         wr.num_sge  = 1;
1719
1720         return ib_post_recv(ch->qp, &wr, &bad_wr);
1721 }
1722
1723 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1724 {
1725         struct srp_target_port *target = ch->target;
1726         struct srp_request *req;
1727         struct scsi_cmnd *scmnd;
1728         unsigned long flags;
1729
1730         if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1731                 spin_lock_irqsave(&ch->lock, flags);
1732                 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1733                 spin_unlock_irqrestore(&ch->lock, flags);
1734
1735                 ch->tsk_mgmt_status = -1;
1736                 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1737                         ch->tsk_mgmt_status = rsp->data[3];
1738                 complete(&ch->tsk_mgmt_done);
1739         } else {
1740                 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1741                 if (scmnd) {
1742                         req = (void *)scmnd->host_scribble;
1743                         scmnd = srp_claim_req(ch, req, NULL, scmnd);
1744                 }
1745                 if (!scmnd) {
1746                         shost_printk(KERN_ERR, target->scsi_host,
1747                                      "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1748                                      rsp->tag, ch - target->ch, ch->qp->qp_num);
1749
1750                         spin_lock_irqsave(&ch->lock, flags);
1751                         ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1752                         spin_unlock_irqrestore(&ch->lock, flags);
1753
1754                         return;
1755                 }
1756                 scmnd->result = rsp->status;
1757
1758                 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1759                         memcpy(scmnd->sense_buffer, rsp->data +
1760                                be32_to_cpu(rsp->resp_data_len),
1761                                min_t(int, be32_to_cpu(rsp->sense_data_len),
1762                                      SCSI_SENSE_BUFFERSIZE));
1763                 }
1764
1765                 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1766                         scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1767                 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1768                         scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1769                 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1770                         scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1771                 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1772                         scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1773
1774                 srp_free_req(ch, req, scmnd,
1775                              be32_to_cpu(rsp->req_lim_delta));
1776
1777                 scmnd->host_scribble = NULL;
1778                 scmnd->scsi_done(scmnd);
1779         }
1780 }
1781
1782 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1783                                void *rsp, int len)
1784 {
1785         struct srp_target_port *target = ch->target;
1786         struct ib_device *dev = target->srp_host->srp_dev->dev;
1787         unsigned long flags;
1788         struct srp_iu *iu;
1789         int err;
1790
1791         spin_lock_irqsave(&ch->lock, flags);
1792         ch->req_lim += req_delta;
1793         iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1794         spin_unlock_irqrestore(&ch->lock, flags);
1795
1796         if (!iu) {
1797                 shost_printk(KERN_ERR, target->scsi_host, PFX
1798                              "no IU available to send response\n");
1799                 return 1;
1800         }
1801
1802         ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1803         memcpy(iu->buf, rsp, len);
1804         ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1805
1806         err = srp_post_send(ch, iu, len);
1807         if (err) {
1808                 shost_printk(KERN_ERR, target->scsi_host, PFX
1809                              "unable to post response: %d\n", err);
1810                 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1811         }
1812
1813         return err;
1814 }
1815
1816 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1817                                  struct srp_cred_req *req)
1818 {
1819         struct srp_cred_rsp rsp = {
1820                 .opcode = SRP_CRED_RSP,
1821                 .tag = req->tag,
1822         };
1823         s32 delta = be32_to_cpu(req->req_lim_delta);
1824
1825         if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1826                 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1827                              "problems processing SRP_CRED_REQ\n");
1828 }
1829
1830 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1831                                 struct srp_aer_req *req)
1832 {
1833         struct srp_target_port *target = ch->target;
1834         struct srp_aer_rsp rsp = {
1835                 .opcode = SRP_AER_RSP,
1836                 .tag = req->tag,
1837         };
1838         s32 delta = be32_to_cpu(req->req_lim_delta);
1839
1840         shost_printk(KERN_ERR, target->scsi_host, PFX
1841                      "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1842
1843         if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1844                 shost_printk(KERN_ERR, target->scsi_host, PFX
1845                              "problems processing SRP_AER_REQ\n");
1846 }
1847
1848 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1849 {
1850         struct srp_target_port *target = ch->target;
1851         struct ib_device *dev = target->srp_host->srp_dev->dev;
1852         struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1853         int res;
1854         u8 opcode;
1855
1856         ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1857                                    DMA_FROM_DEVICE);
1858
1859         opcode = *(u8 *) iu->buf;
1860
1861         if (0) {
1862                 shost_printk(KERN_ERR, target->scsi_host,
1863                              PFX "recv completion, opcode 0x%02x\n", opcode);
1864                 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1865                                iu->buf, wc->byte_len, true);
1866         }
1867
1868         switch (opcode) {
1869         case SRP_RSP:
1870                 srp_process_rsp(ch, iu->buf);
1871                 break;
1872
1873         case SRP_CRED_REQ:
1874                 srp_process_cred_req(ch, iu->buf);
1875                 break;
1876
1877         case SRP_AER_REQ:
1878                 srp_process_aer_req(ch, iu->buf);
1879                 break;
1880
1881         case SRP_T_LOGOUT:
1882                 /* XXX Handle target logout */
1883                 shost_printk(KERN_WARNING, target->scsi_host,
1884                              PFX "Got target logout request\n");
1885                 break;
1886
1887         default:
1888                 shost_printk(KERN_WARNING, target->scsi_host,
1889                              PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1890                 break;
1891         }
1892
1893         ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1894                                       DMA_FROM_DEVICE);
1895
1896         res = srp_post_recv(ch, iu);
1897         if (res != 0)
1898                 shost_printk(KERN_ERR, target->scsi_host,
1899                              PFX "Recv failed with error code %d\n", res);
1900 }
1901
1902 /**
1903  * srp_tl_err_work() - handle a transport layer error
1904  * @work: Work structure embedded in an SRP target port.
1905  *
1906  * Note: This function may get invoked before the rport has been created,
1907  * hence the target->rport test.
1908  */
1909 static void srp_tl_err_work(struct work_struct *work)
1910 {
1911         struct srp_target_port *target;
1912
1913         target = container_of(work, struct srp_target_port, tl_err_work);
1914         if (target->rport)
1915                 srp_start_tl_fail_timers(target->rport);
1916 }
1917
1918 static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1919                               bool send_err, struct srp_rdma_ch *ch)
1920 {
1921         struct srp_target_port *target = ch->target;
1922
1923         if (wr_id == SRP_LAST_WR_ID) {
1924                 complete(&ch->done);
1925                 return;
1926         }
1927
1928         if (ch->connected && !target->qp_in_error) {
1929                 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1930                         shost_printk(KERN_ERR, target->scsi_host, PFX
1931                                      "LOCAL_INV failed with status %s (%d)\n",
1932                                      ib_wc_status_msg(wc_status), wc_status);
1933                 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1934                         shost_printk(KERN_ERR, target->scsi_host, PFX
1935                                      "FAST_REG_MR failed status %s (%d)\n",
1936                                      ib_wc_status_msg(wc_status), wc_status);
1937                 } else {
1938                         shost_printk(KERN_ERR, target->scsi_host,
1939                                      PFX "failed %s status %s (%d) for iu %p\n",
1940                                      send_err ? "send" : "receive",
1941                                      ib_wc_status_msg(wc_status), wc_status,
1942                                      (void *)(uintptr_t)wr_id);
1943                 }
1944                 queue_work(system_long_wq, &target->tl_err_work);
1945         }
1946         target->qp_in_error = true;
1947 }
1948
1949 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1950 {
1951         struct srp_rdma_ch *ch = ch_ptr;
1952         struct ib_wc wc;
1953
1954         ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1955         while (ib_poll_cq(cq, 1, &wc) > 0) {
1956                 if (likely(wc.status == IB_WC_SUCCESS)) {
1957                         srp_handle_recv(ch, &wc);
1958                 } else {
1959                         srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1960                 }
1961         }
1962 }
1963
1964 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1965 {
1966         struct srp_rdma_ch *ch = ch_ptr;
1967         struct ib_wc wc;
1968         struct srp_iu *iu;
1969
1970         while (ib_poll_cq(cq, 1, &wc) > 0) {
1971                 if (likely(wc.status == IB_WC_SUCCESS)) {
1972                         iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1973                         list_add(&iu->list, &ch->free_tx);
1974                 } else {
1975                         srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1976                 }
1977         }
1978 }
1979
1980 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1981 {
1982         struct srp_target_port *target = host_to_target(shost);
1983         struct srp_rport *rport = target->rport;
1984         struct srp_rdma_ch *ch;
1985         struct srp_request *req;
1986         struct srp_iu *iu;
1987         struct srp_cmd *cmd;
1988         struct ib_device *dev;
1989         unsigned long flags;
1990         u32 tag;
1991         u16 idx;
1992         int len, ret;
1993         const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1994
1995         /*
1996          * The SCSI EH thread is the only context from which srp_queuecommand()
1997          * can get invoked for blocked devices (SDEV_BLOCK /
1998          * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1999          * locking the rport mutex if invoked from inside the SCSI EH.
2000          */
2001         if (in_scsi_eh)
2002                 mutex_lock(&rport->mutex);
2003
2004         scmnd->result = srp_chkready(target->rport);
2005         if (unlikely(scmnd->result))
2006                 goto err;
2007
2008         WARN_ON_ONCE(scmnd->request->tag < 0);
2009         tag = blk_mq_unique_tag(scmnd->request);
2010         ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2011         idx = blk_mq_unique_tag_to_tag(tag);
2012         WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2013                   dev_name(&shost->shost_gendev), tag, idx,
2014                   target->req_ring_size);
2015
2016         spin_lock_irqsave(&ch->lock, flags);
2017         iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2018         spin_unlock_irqrestore(&ch->lock, flags);
2019
2020         if (!iu)
2021                 goto err;
2022
2023         req = &ch->req_ring[idx];
2024         dev = target->srp_host->srp_dev->dev;
2025         ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2026                                    DMA_TO_DEVICE);
2027
2028         scmnd->host_scribble = (void *) req;
2029
2030         cmd = iu->buf;
2031         memset(cmd, 0, sizeof *cmd);
2032
2033         cmd->opcode = SRP_CMD;
2034         int_to_scsilun(scmnd->device->lun, &cmd->lun);
2035         cmd->tag    = tag;
2036         memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2037
2038         req->scmnd    = scmnd;
2039         req->cmd      = iu;
2040
2041         len = srp_map_data(scmnd, ch, req);
2042         if (len < 0) {
2043                 shost_printk(KERN_ERR, target->scsi_host,
2044                              PFX "Failed to map data (%d)\n", len);
2045                 /*
2046                  * If we ran out of memory descriptors (-ENOMEM) because an
2047                  * application is queuing many requests with more than
2048                  * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2049                  * to reduce queue depth temporarily.
2050                  */
2051                 scmnd->result = len == -ENOMEM ?
2052                         DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2053                 goto err_iu;
2054         }
2055
2056         ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2057                                       DMA_TO_DEVICE);
2058
2059         if (srp_post_send(ch, iu, len)) {
2060                 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2061                 goto err_unmap;
2062         }
2063
2064         ret = 0;
2065
2066 unlock_rport:
2067         if (in_scsi_eh)
2068                 mutex_unlock(&rport->mutex);
2069
2070         return ret;
2071
2072 err_unmap:
2073         srp_unmap_data(scmnd, ch, req);
2074
2075 err_iu:
2076         srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2077
2078         /*
2079          * Avoid that the loops that iterate over the request ring can
2080          * encounter a dangling SCSI command pointer.
2081          */
2082         req->scmnd = NULL;
2083
2084 err:
2085         if (scmnd->result) {
2086                 scmnd->scsi_done(scmnd);
2087                 ret = 0;
2088         } else {
2089                 ret = SCSI_MLQUEUE_HOST_BUSY;
2090         }
2091
2092         goto unlock_rport;
2093 }
2094
2095 /*
2096  * Note: the resources allocated in this function are freed in
2097  * srp_free_ch_ib().
2098  */
2099 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2100 {
2101         struct srp_target_port *target = ch->target;
2102         int i;
2103
2104         ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2105                               GFP_KERNEL);
2106         if (!ch->rx_ring)
2107                 goto err_no_ring;
2108         ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2109                               GFP_KERNEL);
2110         if (!ch->tx_ring)
2111                 goto err_no_ring;
2112
2113         for (i = 0; i < target->queue_size; ++i) {
2114                 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2115                                               ch->max_ti_iu_len,
2116                                               GFP_KERNEL, DMA_FROM_DEVICE);
2117                 if (!ch->rx_ring[i])
2118                         goto err;
2119         }
2120
2121         for (i = 0; i < target->queue_size; ++i) {
2122                 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2123                                               target->max_iu_len,
2124                                               GFP_KERNEL, DMA_TO_DEVICE);
2125                 if (!ch->tx_ring[i])
2126                         goto err;
2127
2128                 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2129         }
2130
2131         return 0;
2132
2133 err:
2134         for (i = 0; i < target->queue_size; ++i) {
2135                 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2136                 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2137         }
2138
2139
2140 err_no_ring:
2141         kfree(ch->tx_ring);
2142         ch->tx_ring = NULL;
2143         kfree(ch->rx_ring);
2144         ch->rx_ring = NULL;
2145
2146         return -ENOMEM;
2147 }
2148
2149 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2150 {
2151         uint64_t T_tr_ns, max_compl_time_ms;
2152         uint32_t rq_tmo_jiffies;
2153
2154         /*
2155          * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2156          * table 91), both the QP timeout and the retry count have to be set
2157          * for RC QP's during the RTR to RTS transition.
2158          */
2159         WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2160                      (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2161
2162         /*
2163          * Set target->rq_tmo_jiffies to one second more than the largest time
2164          * it can take before an error completion is generated. See also
2165          * C9-140..142 in the IBTA spec for more information about how to
2166          * convert the QP Local ACK Timeout value to nanoseconds.
2167          */
2168         T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2169         max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2170         do_div(max_compl_time_ms, NSEC_PER_MSEC);
2171         rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2172
2173         return rq_tmo_jiffies;
2174 }
2175
2176 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2177                                const struct srp_login_rsp *lrsp,
2178                                struct srp_rdma_ch *ch)
2179 {
2180         struct srp_target_port *target = ch->target;
2181         struct ib_qp_attr *qp_attr = NULL;
2182         int attr_mask = 0;
2183         int ret;
2184         int i;
2185
2186         if (lrsp->opcode == SRP_LOGIN_RSP) {
2187                 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2188                 ch->req_lim       = be32_to_cpu(lrsp->req_lim_delta);
2189
2190                 /*
2191                  * Reserve credits for task management so we don't
2192                  * bounce requests back to the SCSI mid-layer.
2193                  */
2194                 target->scsi_host->can_queue
2195                         = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2196                               target->scsi_host->can_queue);
2197                 target->scsi_host->cmd_per_lun
2198                         = min_t(int, target->scsi_host->can_queue,
2199                                 target->scsi_host->cmd_per_lun);
2200         } else {
2201                 shost_printk(KERN_WARNING, target->scsi_host,
2202                              PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2203                 ret = -ECONNRESET;
2204                 goto error;
2205         }
2206
2207         if (!ch->rx_ring) {
2208                 ret = srp_alloc_iu_bufs(ch);
2209                 if (ret)
2210                         goto error;
2211         }
2212
2213         ret = -ENOMEM;
2214         qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2215         if (!qp_attr)
2216                 goto error;
2217
2218         qp_attr->qp_state = IB_QPS_RTR;
2219         ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2220         if (ret)
2221                 goto error_free;
2222
2223         ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2224         if (ret)
2225                 goto error_free;
2226
2227         for (i = 0; i < target->queue_size; i++) {
2228                 struct srp_iu *iu = ch->rx_ring[i];
2229
2230                 ret = srp_post_recv(ch, iu);
2231                 if (ret)
2232                         goto error_free;
2233         }
2234
2235         qp_attr->qp_state = IB_QPS_RTS;
2236         ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2237         if (ret)
2238                 goto error_free;
2239
2240         target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2241
2242         ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2243         if (ret)
2244                 goto error_free;
2245
2246         ret = ib_send_cm_rtu(cm_id, NULL, 0);
2247
2248 error_free:
2249         kfree(qp_attr);
2250
2251 error:
2252         ch->status = ret;
2253 }
2254
2255 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2256                                struct ib_cm_event *event,
2257                                struct srp_rdma_ch *ch)
2258 {
2259         struct srp_target_port *target = ch->target;
2260         struct Scsi_Host *shost = target->scsi_host;
2261         struct ib_class_port_info *cpi;
2262         int opcode;
2263
2264         switch (event->param.rej_rcvd.reason) {
2265         case IB_CM_REJ_PORT_CM_REDIRECT:
2266                 cpi = event->param.rej_rcvd.ari;
2267                 ch->path.dlid = cpi->redirect_lid;
2268                 ch->path.pkey = cpi->redirect_pkey;
2269                 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2270                 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2271
2272                 ch->status = ch->path.dlid ?
2273                         SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2274                 break;
2275
2276         case IB_CM_REJ_PORT_REDIRECT:
2277                 if (srp_target_is_topspin(target)) {
2278                         /*
2279                          * Topspin/Cisco SRP gateways incorrectly send
2280                          * reject reason code 25 when they mean 24
2281                          * (port redirect).
2282                          */
2283                         memcpy(ch->path.dgid.raw,
2284                                event->param.rej_rcvd.ari, 16);
2285
2286                         shost_printk(KERN_DEBUG, shost,
2287                                      PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2288                                      be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2289                                      be64_to_cpu(ch->path.dgid.global.interface_id));
2290
2291                         ch->status = SRP_PORT_REDIRECT;
2292                 } else {
2293                         shost_printk(KERN_WARNING, shost,
2294                                      "  REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2295                         ch->status = -ECONNRESET;
2296                 }
2297                 break;
2298
2299         case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2300                 shost_printk(KERN_WARNING, shost,
2301                             "  REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2302                 ch->status = -ECONNRESET;
2303                 break;
2304
2305         case IB_CM_REJ_CONSUMER_DEFINED:
2306                 opcode = *(u8 *) event->private_data;
2307                 if (opcode == SRP_LOGIN_REJ) {
2308                         struct srp_login_rej *rej = event->private_data;
2309                         u32 reason = be32_to_cpu(rej->reason);
2310
2311                         if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2312                                 shost_printk(KERN_WARNING, shost,
2313                                              PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2314                         else
2315                                 shost_printk(KERN_WARNING, shost, PFX
2316                                              "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2317                                              target->sgid.raw,
2318                                              target->orig_dgid.raw, reason);
2319                 } else
2320                         shost_printk(KERN_WARNING, shost,
2321                                      "  REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2322                                      " opcode 0x%02x\n", opcode);
2323                 ch->status = -ECONNRESET;
2324                 break;
2325
2326         case IB_CM_REJ_STALE_CONN:
2327                 shost_printk(KERN_WARNING, shost, "  REJ reason: stale connection\n");
2328                 ch->status = SRP_STALE_CONN;
2329                 break;
2330
2331         default:
2332                 shost_printk(KERN_WARNING, shost, "  REJ reason 0x%x\n",
2333                              event->param.rej_rcvd.reason);
2334                 ch->status = -ECONNRESET;
2335         }
2336 }
2337
2338 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2339 {
2340         struct srp_rdma_ch *ch = cm_id->context;
2341         struct srp_target_port *target = ch->target;
2342         int comp = 0;
2343
2344         switch (event->event) {
2345         case IB_CM_REQ_ERROR:
2346                 shost_printk(KERN_DEBUG, target->scsi_host,
2347                              PFX "Sending CM REQ failed\n");
2348                 comp = 1;
2349                 ch->status = -ECONNRESET;
2350                 break;
2351
2352         case IB_CM_REP_RECEIVED:
2353                 comp = 1;
2354                 srp_cm_rep_handler(cm_id, event->private_data, ch);
2355                 break;
2356
2357         case IB_CM_REJ_RECEIVED:
2358                 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2359                 comp = 1;
2360
2361                 srp_cm_rej_handler(cm_id, event, ch);
2362                 break;
2363
2364         case IB_CM_DREQ_RECEIVED:
2365                 shost_printk(KERN_WARNING, target->scsi_host,
2366                              PFX "DREQ received - connection closed\n");
2367                 ch->connected = false;
2368                 if (ib_send_cm_drep(cm_id, NULL, 0))
2369                         shost_printk(KERN_ERR, target->scsi_host,
2370                                      PFX "Sending CM DREP failed\n");
2371                 queue_work(system_long_wq, &target->tl_err_work);
2372                 break;
2373
2374         case IB_CM_TIMEWAIT_EXIT:
2375                 shost_printk(KERN_ERR, target->scsi_host,
2376                              PFX "connection closed\n");
2377                 comp = 1;
2378
2379                 ch->status = 0;
2380                 break;
2381
2382         case IB_CM_MRA_RECEIVED:
2383         case IB_CM_DREQ_ERROR:
2384         case IB_CM_DREP_RECEIVED:
2385                 break;
2386
2387         default:
2388                 shost_printk(KERN_WARNING, target->scsi_host,
2389                              PFX "Unhandled CM event %d\n", event->event);
2390                 break;
2391         }
2392
2393         if (comp)
2394                 complete(&ch->done);
2395
2396         return 0;
2397 }
2398
2399 /**
2400  * srp_change_queue_depth - setting device queue depth
2401  * @sdev: scsi device struct
2402  * @qdepth: requested queue depth
2403  *
2404  * Returns queue depth.
2405  */
2406 static int
2407 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2408 {
2409         if (!sdev->tagged_supported)
2410                 qdepth = 1;
2411         return scsi_change_queue_depth(sdev, qdepth);
2412 }
2413
2414 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2415                              u8 func)
2416 {
2417         struct srp_target_port *target = ch->target;
2418         struct srp_rport *rport = target->rport;
2419         struct ib_device *dev = target->srp_host->srp_dev->dev;
2420         struct srp_iu *iu;
2421         struct srp_tsk_mgmt *tsk_mgmt;
2422
2423         if (!ch->connected || target->qp_in_error)
2424                 return -1;
2425
2426         init_completion(&ch->tsk_mgmt_done);
2427
2428         /*
2429          * Lock the rport mutex to avoid that srp_create_ch_ib() is
2430          * invoked while a task management function is being sent.
2431          */
2432         mutex_lock(&rport->mutex);
2433         spin_lock_irq(&ch->lock);
2434         iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2435         spin_unlock_irq(&ch->lock);
2436
2437         if (!iu) {
2438                 mutex_unlock(&rport->mutex);
2439
2440                 return -1;
2441         }
2442
2443         ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2444                                    DMA_TO_DEVICE);
2445         tsk_mgmt = iu->buf;
2446         memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2447
2448         tsk_mgmt->opcode        = SRP_TSK_MGMT;
2449         int_to_scsilun(lun, &tsk_mgmt->lun);
2450         tsk_mgmt->tag           = req_tag | SRP_TAG_TSK_MGMT;
2451         tsk_mgmt->tsk_mgmt_func = func;
2452         tsk_mgmt->task_tag      = req_tag;
2453
2454         ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2455                                       DMA_TO_DEVICE);
2456         if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2457                 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2458                 mutex_unlock(&rport->mutex);
2459
2460                 return -1;
2461         }
2462         mutex_unlock(&rport->mutex);
2463
2464         if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2465                                          msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2466                 return -1;
2467
2468         return 0;
2469 }
2470
2471 static int srp_abort(struct scsi_cmnd *scmnd)
2472 {
2473         struct srp_target_port *target = host_to_target(scmnd->device->host);
2474         struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2475         u32 tag;
2476         u16 ch_idx;
2477         struct srp_rdma_ch *ch;
2478         int ret;
2479
2480         shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2481
2482         if (!req)
2483                 return SUCCESS;
2484         tag = blk_mq_unique_tag(scmnd->request);
2485         ch_idx = blk_mq_unique_tag_to_hwq(tag);
2486         if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2487                 return SUCCESS;
2488         ch = &target->ch[ch_idx];
2489         if (!srp_claim_req(ch, req, NULL, scmnd))
2490                 return SUCCESS;
2491         shost_printk(KERN_ERR, target->scsi_host,
2492                      "Sending SRP abort for tag %#x\n", tag);
2493         if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2494                               SRP_TSK_ABORT_TASK) == 0)
2495                 ret = SUCCESS;
2496         else if (target->rport->state == SRP_RPORT_LOST)
2497                 ret = FAST_IO_FAIL;
2498         else
2499                 ret = FAILED;
2500         srp_free_req(ch, req, scmnd, 0);
2501         scmnd->result = DID_ABORT << 16;
2502         scmnd->scsi_done(scmnd);
2503
2504         return ret;
2505 }
2506
2507 static int srp_reset_device(struct scsi_cmnd *scmnd)
2508 {
2509         struct srp_target_port *target = host_to_target(scmnd->device->host);
2510         struct srp_rdma_ch *ch;
2511         int i;
2512
2513         shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2514
2515         ch = &target->ch[0];
2516         if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2517                               SRP_TSK_LUN_RESET))
2518                 return FAILED;
2519         if (ch->tsk_mgmt_status)
2520                 return FAILED;
2521
2522         for (i = 0; i < target->ch_count; i++) {
2523                 ch = &target->ch[i];
2524                 for (i = 0; i < target->req_ring_size; ++i) {
2525                         struct srp_request *req = &ch->req_ring[i];
2526
2527                         srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2528                 }
2529         }
2530
2531         return SUCCESS;
2532 }
2533
2534 static int srp_reset_host(struct scsi_cmnd *scmnd)
2535 {
2536         struct srp_target_port *target = host_to_target(scmnd->device->host);
2537
2538         shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2539
2540         return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2541 }
2542
2543 static int srp_slave_configure(struct scsi_device *sdev)
2544 {
2545         struct Scsi_Host *shost = sdev->host;
2546         struct srp_target_port *target = host_to_target(shost);
2547         struct request_queue *q = sdev->request_queue;
2548         unsigned long timeout;
2549
2550         if (sdev->type == TYPE_DISK) {
2551                 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2552                 blk_queue_rq_timeout(q, timeout);
2553         }
2554
2555         return 0;
2556 }
2557
2558 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2559                            char *buf)
2560 {
2561         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2562
2563         return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2564 }
2565
2566 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2567                              char *buf)
2568 {
2569         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2570
2571         return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2572 }
2573
2574 static ssize_t show_service_id(struct device *dev,
2575                                struct device_attribute *attr, char *buf)
2576 {
2577         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2578
2579         return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2580 }
2581
2582 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2583                          char *buf)
2584 {
2585         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2586
2587         return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2588 }
2589
2590 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2591                          char *buf)
2592 {
2593         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2594
2595         return sprintf(buf, "%pI6\n", target->sgid.raw);
2596 }
2597
2598 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2599                          char *buf)
2600 {
2601         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2602         struct srp_rdma_ch *ch = &target->ch[0];
2603
2604         return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2605 }
2606
2607 static ssize_t show_orig_dgid(struct device *dev,
2608                               struct device_attribute *attr, char *buf)
2609 {
2610         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2611
2612         return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2613 }
2614
2615 static ssize_t show_req_lim(struct device *dev,
2616                             struct device_attribute *attr, char *buf)
2617 {
2618         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2619         struct srp_rdma_ch *ch;
2620         int i, req_lim = INT_MAX;
2621
2622         for (i = 0; i < target->ch_count; i++) {
2623                 ch = &target->ch[i];
2624                 req_lim = min(req_lim, ch->req_lim);
2625         }
2626         return sprintf(buf, "%d\n", req_lim);
2627 }
2628
2629 static ssize_t show_zero_req_lim(struct device *dev,
2630                                  struct device_attribute *attr, char *buf)
2631 {
2632         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2633
2634         return sprintf(buf, "%d\n", target->zero_req_lim);
2635 }
2636
2637 static ssize_t show_local_ib_port(struct device *dev,
2638                                   struct device_attribute *attr, char *buf)
2639 {
2640         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2641
2642         return sprintf(buf, "%d\n", target->srp_host->port);
2643 }
2644
2645 static ssize_t show_local_ib_device(struct device *dev,
2646                                     struct device_attribute *attr, char *buf)
2647 {
2648         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2649
2650         return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2651 }
2652
2653 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2654                              char *buf)
2655 {
2656         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2657
2658         return sprintf(buf, "%d\n", target->ch_count);
2659 }
2660
2661 static ssize_t show_comp_vector(struct device *dev,
2662                                 struct device_attribute *attr, char *buf)
2663 {
2664         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2665
2666         return sprintf(buf, "%d\n", target->comp_vector);
2667 }
2668
2669 static ssize_t show_tl_retry_count(struct device *dev,
2670                                    struct device_attribute *attr, char *buf)
2671 {
2672         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2673
2674         return sprintf(buf, "%d\n", target->tl_retry_count);
2675 }
2676
2677 static ssize_t show_cmd_sg_entries(struct device *dev,
2678                                    struct device_attribute *attr, char *buf)
2679 {
2680         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2681
2682         return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2683 }
2684
2685 static ssize_t show_allow_ext_sg(struct device *dev,
2686                                  struct device_attribute *attr, char *buf)
2687 {
2688         struct srp_target_port *target = host_to_target(class_to_shost(dev));
2689
2690         return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2691 }
2692
2693 static DEVICE_ATTR(id_ext,          S_IRUGO, show_id_ext,          NULL);
2694 static DEVICE_ATTR(ioc_guid,        S_IRUGO, show_ioc_guid,        NULL);
2695 static DEVICE_ATTR(service_id,      S_IRUGO, show_service_id,      NULL);
2696 static DEVICE_ATTR(pkey,            S_IRUGO, show_pkey,            NULL);
2697 static DEVICE_ATTR(sgid,            S_IRUGO, show_sgid,            NULL);
2698 static DEVICE_ATTR(dgid,            S_IRUGO, show_dgid,            NULL);
2699 static DEVICE_ATTR(orig_dgid,       S_IRUGO, show_orig_dgid,       NULL);
2700 static DEVICE_ATTR(req_lim,         S_IRUGO, show_req_lim,         NULL);
2701 static DEVICE_ATTR(zero_req_lim,    S_IRUGO, show_zero_req_lim,    NULL);
2702 static DEVICE_ATTR(local_ib_port,   S_IRUGO, show_local_ib_port,   NULL);
2703 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2704 static DEVICE_ATTR(ch_count,        S_IRUGO, show_ch_count,        NULL);
2705 static DEVICE_ATTR(comp_vector,     S_IRUGO, show_comp_vector,     NULL);
2706 static DEVICE_ATTR(tl_retry_count,  S_IRUGO, show_tl_retry_count,  NULL);
2707 static DEVICE_ATTR(cmd_sg_entries,  S_IRUGO, show_cmd_sg_entries,  NULL);
2708 static DEVICE_ATTR(allow_ext_sg,    S_IRUGO, show_allow_ext_sg,    NULL);
2709
2710 static struct device_attribute *srp_host_attrs[] = {
2711         &dev_attr_id_ext,
2712         &dev_attr_ioc_guid,
2713         &dev_attr_service_id,
2714         &dev_attr_pkey,
2715         &dev_attr_sgid,
2716         &dev_attr_dgid,
2717         &dev_attr_orig_dgid,
2718         &dev_attr_req_lim,
2719         &dev_attr_zero_req_lim,
2720         &dev_attr_local_ib_port,
2721         &dev_attr_local_ib_device,
2722         &dev_attr_ch_count,
2723         &dev_attr_comp_vector,
2724         &dev_attr_tl_retry_count,
2725         &dev_attr_cmd_sg_entries,
2726         &dev_attr_allow_ext_sg,
2727         NULL
2728 };
2729
2730 static struct scsi_host_template srp_template = {
2731         .module                         = THIS_MODULE,
2732         .name                           = "InfiniBand SRP initiator",
2733         .proc_name                      = DRV_NAME,
2734         .slave_configure                = srp_slave_configure,
2735         .info                           = srp_target_info,
2736         .queuecommand                   = srp_queuecommand,
2737         .change_queue_depth             = srp_change_queue_depth,
2738         .eh_abort_handler               = srp_abort,
2739         .eh_device_reset_handler        = srp_reset_device,
2740         .eh_host_reset_handler          = srp_reset_host,
2741         .skip_settle_delay              = true,
2742         .sg_tablesize                   = SRP_DEF_SG_TABLESIZE,
2743         .can_queue                      = SRP_DEFAULT_CMD_SQ_SIZE,
2744         .this_id                        = -1,
2745         .cmd_per_lun                    = SRP_DEFAULT_CMD_SQ_SIZE,
2746         .use_clustering                 = ENABLE_CLUSTERING,
2747         .shost_attrs                    = srp_host_attrs,
2748         .use_blk_tags                   = 1,
2749         .track_queue_depth              = 1,
2750 };
2751
2752 static int srp_sdev_count(struct Scsi_Host *host)
2753 {
2754         struct scsi_device *sdev;
2755         int c = 0;
2756
2757         shost_for_each_device(sdev, host)
2758                 c++;
2759
2760         return c;
2761 }
2762
2763 /*
2764  * Return values:
2765  * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2766  * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2767  *    removal has been scheduled.
2768  * 0 and target->state != SRP_TARGET_REMOVED upon success.
2769  */
2770 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2771 {
2772         struct srp_rport_identifiers ids;
2773         struct srp_rport *rport;
2774
2775         target->state = SRP_TARGET_SCANNING;
2776         sprintf(target->target_name, "SRP.T10:%016llX",
2777                 be64_to_cpu(target->id_ext));
2778
2779         if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2780                 return -ENODEV;
2781
2782         memcpy(ids.port_id, &target->id_ext, 8);
2783         memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2784         ids.roles = SRP_RPORT_ROLE_TARGET;
2785         rport = srp_rport_add(target->scsi_host, &ids);
2786         if (IS_ERR(rport)) {
2787                 scsi_remove_host(target->scsi_host);
2788                 return PTR_ERR(rport);
2789         }
2790
2791         rport->lld_data = target;
2792         target->rport = rport;
2793
2794         spin_lock(&host->target_lock);
2795         list_add_tail(&target->list, &host->target_list);
2796         spin_unlock(&host->target_lock);
2797
2798         scsi_scan_target(&target->scsi_host->shost_gendev,
2799                          0, target->scsi_id, SCAN_WILD_CARD, 0);
2800
2801         if (srp_connected_ch(target) < target->ch_count ||
2802             target->qp_in_error) {
2803                 shost_printk(KERN_INFO, target->scsi_host,
2804                              PFX "SCSI scan failed - removing SCSI host\n");
2805                 srp_queue_remove_work(target);
2806                 goto out;
2807         }
2808
2809         pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2810                  dev_name(&target->scsi_host->shost_gendev),
2811                  srp_sdev_count(target->scsi_host));
2812
2813         spin_lock_irq(&target->lock);
2814         if (target->state == SRP_TARGET_SCANNING)
2815                 target->state = SRP_TARGET_LIVE;
2816         spin_unlock_irq(&target->lock);
2817
2818 out:
2819         return 0;
2820 }
2821
2822 static void srp_release_dev(struct device *dev)
2823 {
2824         struct srp_host *host =
2825                 container_of(dev, struct srp_host, dev);
2826
2827         complete(&host->released);
2828 }
2829
2830 static struct class srp_class = {
2831         .name    = "infiniband_srp",
2832         .dev_release = srp_release_dev
2833 };
2834
2835 /**
2836  * srp_conn_unique() - check whether the connection to a target is unique
2837  * @host:   SRP host.
2838  * @target: SRP target port.
2839  */
2840 static bool srp_conn_unique(struct srp_host *host,
2841                             struct srp_target_port *target)
2842 {
2843         struct srp_target_port *t;
2844         bool ret = false;
2845
2846         if (target->state == SRP_TARGET_REMOVED)
2847                 goto out;
2848
2849         ret = true;
2850
2851         spin_lock(&host->target_lock);
2852         list_for_each_entry(t, &host->target_list, list) {
2853                 if (t != target &&
2854                     target->id_ext == t->id_ext &&
2855                     target->ioc_guid == t->ioc_guid &&
2856                     target->initiator_ext == t->initiator_ext) {
2857                         ret = false;
2858                         break;
2859                 }
2860         }
2861         spin_unlock(&host->target_lock);
2862
2863 out:
2864         return ret;
2865 }
2866
2867 /*
2868  * Target ports are added by writing
2869  *
2870  *     id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2871  *     pkey=<P_Key>,service_id=<service ID>
2872  *
2873  * to the add_target sysfs attribute.
2874  */
2875 enum {
2876         SRP_OPT_ERR             = 0,
2877         SRP_OPT_ID_EXT          = 1 << 0,
2878         SRP_OPT_IOC_GUID        = 1 << 1,
2879         SRP_OPT_DGID            = 1 << 2,
2880         SRP_OPT_PKEY            = 1 << 3,
2881         SRP_OPT_SERVICE_ID      = 1 << 4,
2882         SRP_OPT_MAX_SECT        = 1 << 5,
2883         SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2884         SRP_OPT_IO_CLASS        = 1 << 7,
2885         SRP_OPT_INITIATOR_EXT   = 1 << 8,
2886         SRP_OPT_CMD_SG_ENTRIES  = 1 << 9,
2887         SRP_OPT_ALLOW_EXT_SG    = 1 << 10,
2888         SRP_OPT_SG_TABLESIZE    = 1 << 11,
2889         SRP_OPT_COMP_VECTOR     = 1 << 12,
2890         SRP_OPT_TL_RETRY_COUNT  = 1 << 13,
2891         SRP_OPT_QUEUE_SIZE      = 1 << 14,
2892         SRP_OPT_ALL             = (SRP_OPT_ID_EXT       |
2893                                    SRP_OPT_IOC_GUID     |
2894                                    SRP_OPT_DGID         |
2895                                    SRP_OPT_PKEY         |
2896                                    SRP_OPT_SERVICE_ID),
2897 };
2898
2899 static const match_table_t srp_opt_tokens = {
2900         { SRP_OPT_ID_EXT,               "id_ext=%s"             },
2901         { SRP_OPT_IOC_GUID,             "ioc_guid=%s"           },
2902         { SRP_OPT_DGID,                 "dgid=%s"               },
2903         { SRP_OPT_PKEY,                 "pkey=%x"               },
2904         { SRP_OPT_SERVICE_ID,           "service_id=%s"         },
2905         { SRP_OPT_MAX_SECT,             "max_sect=%d"           },
2906         { SRP_OPT_MAX_CMD_PER_LUN,      "max_cmd_per_lun=%d"    },
2907         { SRP_OPT_IO_CLASS,             "io_class=%x"           },
2908         { SRP_OPT_INITIATOR_EXT,        "initiator_ext=%s"      },
2909         { SRP_OPT_CMD_SG_ENTRIES,       "cmd_sg_entries=%u"     },
2910         { SRP_OPT_ALLOW_EXT_SG,         "allow_ext_sg=%u"       },
2911         { SRP_OPT_SG_TABLESIZE,         "sg_tablesize=%u"       },
2912         { SRP_OPT_COMP_VECTOR,          "comp_vector=%u"        },
2913         { SRP_OPT_TL_RETRY_COUNT,       "tl_retry_count=%u"     },
2914         { SRP_OPT_QUEUE_SIZE,           "queue_size=%d"         },
2915         { SRP_OPT_ERR,                  NULL                    }
2916 };
2917
2918 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2919 {
2920         char *options, *sep_opt;
2921         char *p;
2922         char dgid[3];
2923         substring_t args[MAX_OPT_ARGS];
2924         int opt_mask = 0;
2925         int token;
2926         int ret = -EINVAL;
2927         int i;
2928
2929         options = kstrdup(buf, GFP_KERNEL);
2930         if (!options)
2931                 return -ENOMEM;
2932
2933         sep_opt = options;
2934         while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2935                 if (!*p)
2936                         continue;
2937
2938                 token = match_token(p, srp_opt_tokens, args);
2939                 opt_mask |= token;
2940
2941                 switch (token) {
2942                 case SRP_OPT_ID_EXT:
2943                         p = match_strdup(args);
2944                         if (!p) {
2945                                 ret = -ENOMEM;
2946                                 goto out;
2947                         }
2948                         target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2949                         kfree(p);
2950                         break;
2951
2952                 case SRP_OPT_IOC_GUID:
2953                         p = match_strdup(args);
2954                         if (!p) {
2955                                 ret = -ENOMEM;
2956                                 goto out;
2957                         }
2958                         target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2959                         kfree(p);
2960                         break;
2961
2962                 case SRP_OPT_DGID:
2963                         p = match_strdup(args);
2964                         if (!p) {
2965                                 ret = -ENOMEM;
2966                                 goto out;
2967                         }
2968                         if (strlen(p) != 32) {
2969                                 pr_warn("bad dest GID parameter '%s'\n", p);
2970                                 kfree(p);
2971                                 goto out;
2972                         }
2973
2974                         for (i = 0; i < 16; ++i) {
2975                                 strlcpy(dgid, p + i * 2, sizeof(dgid));
2976                                 if (sscanf(dgid, "%hhx",
2977                                            &target->orig_dgid.raw[i]) < 1) {
2978                                         ret = -EINVAL;
2979                                         kfree(p);
2980                                         goto out;
2981                                 }
2982                         }
2983                         kfree(p);
2984                         break;
2985
2986                 case SRP_OPT_PKEY:
2987                         if (match_hex(args, &token)) {
2988                                 pr_warn("bad P_Key parameter '%s'\n", p);
2989                                 goto out;
2990                         }
2991                         target->pkey = cpu_to_be16(token);
2992                         break;
2993
2994                 case SRP_OPT_SERVICE_ID:
2995                         p = match_strdup(args);
2996                         if (!p) {
2997                                 ret = -ENOMEM;
2998                                 goto out;
2999                         }
3000                         target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3001                         kfree(p);
3002                         break;
3003
3004                 case SRP_OPT_MAX_SECT:
3005                         if (match_int(args, &token)) {
3006                                 pr_warn("bad max sect parameter '%s'\n", p);
3007                                 goto out;
3008                         }
3009                         target->scsi_host->max_sectors = token;
3010                         break;
3011
3012                 case SRP_OPT_QUEUE_SIZE:
3013                         if (match_int(args, &token) || token < 1) {
3014                                 pr_warn("bad queue_size parameter '%s'\n", p);
3015                                 goto out;
3016                         }
3017                         target->scsi_host->can_queue = token;
3018                         target->queue_size = token + SRP_RSP_SQ_SIZE +
3019                                              SRP_TSK_MGMT_SQ_SIZE;
3020                         if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3021                                 target->scsi_host->cmd_per_lun = token;
3022                         break;
3023
3024                 case SRP_OPT_MAX_CMD_PER_LUN:
3025                         if (match_int(args, &token) || token < 1) {
3026                                 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3027                                         p);
3028                                 goto out;
3029                         }
3030                         target->scsi_host->cmd_per_lun = token;
3031                         break;
3032
3033                 case SRP_OPT_IO_CLASS:
3034                         if (match_hex(args, &token)) {
3035                                 pr_warn("bad IO class parameter '%s'\n", p);
3036                                 goto out;
3037                         }
3038                         if (token != SRP_REV10_IB_IO_CLASS &&
3039                             token != SRP_REV16A_IB_IO_CLASS) {
3040                                 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3041                                         token, SRP_REV10_IB_IO_CLASS,
3042                                         SRP_REV16A_IB_IO_CLASS);
3043                                 goto out;
3044                         }
3045                         target->io_class = token;
3046                         break;
3047
3048                 case SRP_OPT_INITIATOR_EXT:
3049                         p = match_strdup(args);
3050                         if (!p) {
3051                                 ret = -ENOMEM;
3052                                 goto out;
3053                         }
3054                         target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3055                         kfree(p);
3056                         break;
3057
3058                 case SRP_OPT_CMD_SG_ENTRIES:
3059                         if (match_int(args, &token) || token < 1 || token > 255) {
3060                                 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3061                                         p);
3062                                 goto out;
3063                         }
3064                         target->cmd_sg_cnt = token;
3065                         break;
3066
3067                 case SRP_OPT_ALLOW_EXT_SG:
3068                         if (match_int(args, &token)) {
3069                                 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3070                                 goto out;
3071                         }
3072                         target->allow_ext_sg = !!token;
3073                         break;
3074
3075                 case SRP_OPT_SG_TABLESIZE:
3076                         if (match_int(args, &token) || token < 1 ||
3077                                         token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3078                                 pr_warn("bad max sg_tablesize parameter '%s'\n",
3079                                         p);
3080                                 goto out;
3081                         }
3082                         target->sg_tablesize = token;
3083                         break;
3084
3085                 case SRP_OPT_COMP_VECTOR:
3086                         if (match_int(args, &token) || token < 0) {
3087                                 pr_warn("bad comp_vector parameter '%s'\n", p);
3088                                 goto out;
3089                         }
3090                         target->comp_vector = token;
3091                         break;
3092
3093                 case SRP_OPT_TL_RETRY_COUNT:
3094                         if (match_int(args, &token) || token < 2 || token > 7) {
3095                                 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3096                                         p);
3097                                 goto out;
3098                         }
3099                         target->tl_retry_count = token;
3100                         break;
3101
3102                 default:
3103                         pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3104                                 p);
3105                         goto out;
3106                 }
3107         }
3108
3109         if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3110                 ret = 0;
3111         else
3112                 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3113                         if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3114                             !(srp_opt_tokens[i].token & opt_mask))
3115                                 pr_warn("target creation request is missing parameter '%s'\n",
3116                                         srp_opt_tokens[i].pattern);
3117
3118         if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3119             && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3120                 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3121                         target->scsi_host->cmd_per_lun,
3122                         target->scsi_host->can_queue);
3123
3124 out:
3125         kfree(options);
3126         return ret;
3127 }
3128
3129 static ssize_t srp_create_target(struct device *dev,
3130                                  struct device_attribute *attr,
3131                                  const char *buf, size_t count)
3132 {
3133         struct srp_host *host =
3134                 container_of(dev, struct srp_host, dev);
3135         struct Scsi_Host *target_host;
3136         struct srp_target_port *target;
3137         struct srp_rdma_ch *ch;
3138         struct srp_device *srp_dev = host->srp_dev;
3139         struct ib_device *ibdev = srp_dev->dev;
3140         int ret, node_idx, node, cpu, i;
3141         bool multich = false;
3142
3143         target_host = scsi_host_alloc(&srp_template,
3144                                       sizeof (struct srp_target_port));
3145         if (!target_host)
3146                 return -ENOMEM;
3147
3148         target_host->transportt  = ib_srp_transport_template;
3149         target_host->max_channel = 0;
3150         target_host->max_id      = 1;
3151         target_host->max_lun     = -1LL;
3152         target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3153
3154         target = host_to_target(target_host);
3155
3156         target->io_class        = SRP_REV16A_IB_IO_CLASS;
3157         target->scsi_host       = target_host;
3158         target->srp_host        = host;
3159         target->lkey            = host->srp_dev->pd->local_dma_lkey;
3160         target->rkey            = host->srp_dev->mr->rkey;
3161         target->cmd_sg_cnt      = cmd_sg_entries;
3162         target->sg_tablesize    = indirect_sg_entries ? : cmd_sg_entries;
3163         target->allow_ext_sg    = allow_ext_sg;
3164         target->tl_retry_count  = 7;
3165         target->queue_size      = SRP_DEFAULT_QUEUE_SIZE;
3166
3167         /*
3168          * Avoid that the SCSI host can be removed by srp_remove_target()
3169          * before this function returns.
3170          */
3171         scsi_host_get(target->scsi_host);
3172
3173         mutex_lock(&host->add_target_mutex);
3174
3175         ret = srp_parse_options(buf, target);
3176         if (ret)
3177                 goto out;
3178
3179         ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3180         if (ret)
3181                 goto out;
3182
3183         target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3184
3185         if (!srp_conn_unique(target->srp_host, target)) {
3186                 shost_printk(KERN_INFO, target->scsi_host,
3187                              PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3188                              be64_to_cpu(target->id_ext),
3189                              be64_to_cpu(target->ioc_guid),
3190                              be64_to_cpu(target->initiator_ext));
3191                 ret = -EEXIST;
3192                 goto out;
3193         }
3194
3195         if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3196             target->cmd_sg_cnt < target->sg_tablesize) {
3197                 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3198                 target->sg_tablesize = target->cmd_sg_cnt;
3199         }
3200
3201         target_host->sg_tablesize = target->sg_tablesize;
3202         target->indirect_size = target->sg_tablesize *
3203                                 sizeof (struct srp_direct_buf);
3204         target->max_iu_len = sizeof (struct srp_cmd) +
3205                              sizeof (struct srp_indirect_buf) +
3206                              target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3207
3208         INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3209         INIT_WORK(&target->remove_work, srp_remove_work);
3210         spin_lock_init(&target->lock);
3211         ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3212         if (ret)
3213                 goto out;
3214
3215         ret = -ENOMEM;
3216         target->ch_count = max_t(unsigned, num_online_nodes(),
3217                                  min(ch_count ? :
3218                                      min(4 * num_online_nodes(),
3219                                          ibdev->num_comp_vectors),
3220                                      num_online_cpus()));
3221         target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3222                              GFP_KERNEL);
3223         if (!target->ch)
3224                 goto out;
3225
3226         node_idx = 0;
3227         for_each_online_node(node) {
3228                 const int ch_start = (node_idx * target->ch_count /
3229                                       num_online_nodes());
3230                 const int ch_end = ((node_idx + 1) * target->ch_count /
3231                                     num_online_nodes());
3232                 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3233                                       num_online_nodes() + target->comp_vector)
3234                                      % ibdev->num_comp_vectors;
3235                 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3236                                     num_online_nodes() + target->comp_vector)
3237                                    % ibdev->num_comp_vectors;
3238                 int cpu_idx = 0;
3239
3240                 for_each_online_cpu(cpu) {
3241                         if (cpu_to_node(cpu) != node)
3242                                 continue;
3243                         if (ch_start + cpu_idx >= ch_end)
3244                                 continue;
3245                         ch = &target->ch[ch_start + cpu_idx];
3246                         ch->target = target;
3247                         ch->comp_vector = cv_start == cv_end ? cv_start :
3248                                 cv_start + cpu_idx % (cv_end - cv_start);
3249                         spin_lock_init(&ch->lock);
3250                         INIT_LIST_HEAD(&ch->free_tx);
3251                         ret = srp_new_cm_id(ch);
3252                         if (ret)
3253                                 goto err_disconnect;
3254
3255                         ret = srp_create_ch_ib(ch);
3256                         if (ret)
3257                                 goto err_disconnect;
3258
3259                         ret = srp_alloc_req_data(ch);
3260                         if (ret)
3261                                 goto err_disconnect;
3262
3263                         ret = srp_connect_ch(ch, multich);
3264                         if (ret) {
3265                                 shost_printk(KERN_ERR, target->scsi_host,
3266                                              PFX "Connection %d/%d failed\n",
3267                                              ch_start + cpu_idx,
3268                                              target->ch_count);
3269                                 if (node_idx == 0 && cpu_idx == 0) {
3270                                         goto err_disconnect;
3271                                 } else {
3272                                         srp_free_ch_ib(target, ch);
3273                                         srp_free_req_data(target, ch);
3274                                         target->ch_count = ch - target->ch;
3275                                         goto connected;
3276                                 }
3277                         }
3278
3279                         multich = true;
3280                         cpu_idx++;
3281                 }
3282                 node_idx++;
3283         }
3284
3285 connected:
3286         target->scsi_host->nr_hw_queues = target->ch_count;
3287
3288         ret = srp_add_target(host, target);
3289         if (ret)
3290                 goto err_disconnect;
3291
3292         if (target->state != SRP_TARGET_REMOVED) {
3293                 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3294                              "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3295                              be64_to_cpu(target->id_ext),
3296                              be64_to_cpu(target->ioc_guid),
3297                              be16_to_cpu(target->pkey),
3298                              be64_to_cpu(target->service_id),
3299                              target->sgid.raw, target->orig_dgid.raw);
3300         }
3301
3302         ret = count;
3303
3304 out:
3305         mutex_unlock(&host->add_target_mutex);
3306
3307         scsi_host_put(target->scsi_host);
3308         if (ret < 0)
3309                 scsi_host_put(target->scsi_host);
3310
3311         return ret;
3312
3313 err_disconnect:
3314         srp_disconnect_target(target);
3315
3316         for (i = 0; i < target->ch_count; i++) {
3317                 ch = &target->ch[i];
3318                 srp_free_ch_ib(target, ch);
3319                 srp_free_req_data(target, ch);
3320         }
3321
3322         kfree(target->ch);
3323         goto out;
3324 }
3325
3326 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3327
3328 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3329                           char *buf)
3330 {
3331         struct srp_host *host = container_of(dev, struct srp_host, dev);
3332
3333         return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3334 }
3335
3336 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3337
3338 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3339                          char *buf)
3340 {
3341         struct srp_host *host = container_of(dev, struct srp_host, dev);
3342
3343         return sprintf(buf, "%d\n", host->port);
3344 }
3345
3346 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3347
3348 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3349 {
3350         struct srp_host *host;
3351
3352         host = kzalloc(sizeof *host, GFP_KERNEL);
3353         if (!host)
3354                 return NULL;
3355
3356         INIT_LIST_HEAD(&host->target_list);
3357         spin_lock_init(&host->target_lock);
3358         init_completion(&host->released);
3359         mutex_init(&host->add_target_mutex);
3360         host->srp_dev = device;
3361         host->port = port;
3362
3363         host->dev.class = &srp_class;
3364         host->dev.parent = device->dev->dma_device;
3365         dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3366
3367         if (device_register(&host->dev))
3368                 goto free_host;
3369         if (device_create_file(&host->dev, &dev_attr_add_target))
3370                 goto err_class;
3371         if (device_create_file(&host->dev, &dev_attr_ibdev))
3372                 goto err_class;
3373         if (device_create_file(&host->dev, &dev_attr_port))
3374                 goto err_class;
3375
3376         return host;
3377
3378 err_class:
3379         device_unregister(&host->dev);
3380
3381 free_host:
3382         kfree(host);
3383
3384         return NULL;
3385 }
3386
3387 static void srp_add_one(struct ib_device *device)
3388 {
3389         struct srp_device *srp_dev;
3390         struct ib_device_attr *dev_attr;
3391         struct srp_host *host;
3392         int mr_page_shift, p;
3393         u64 max_pages_per_mr;
3394
3395         dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3396         if (!dev_attr)
3397                 return;
3398
3399         if (ib_query_device(device, dev_attr)) {
3400                 pr_warn("Query device failed for %s\n", device->name);
3401                 goto free_attr;
3402         }
3403
3404         srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3405         if (!srp_dev)
3406                 goto free_attr;
3407
3408         srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3409                             device->map_phys_fmr && device->unmap_fmr);
3410         srp_dev->has_fr = (dev_attr->device_cap_flags &
3411                            IB_DEVICE_MEM_MGT_EXTENSIONS);
3412         if (!srp_dev->has_fmr && !srp_dev->has_fr)
3413                 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3414
3415         srp_dev->use_fast_reg = (srp_dev->has_fr &&
3416                                  (!srp_dev->has_fmr || prefer_fr));
3417         srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3418
3419         /*
3420          * Use the smallest page size supported by the HCA, down to a
3421          * minimum of 4096 bytes. We're unlikely to build large sglists
3422          * out of smaller entries.
3423          */
3424         mr_page_shift           = max(12, ffs(dev_attr->page_size_cap) - 1);
3425         srp_dev->mr_page_size   = 1 << mr_page_shift;
3426         srp_dev->mr_page_mask   = ~((u64) srp_dev->mr_page_size - 1);
3427         max_pages_per_mr        = dev_attr->max_mr_size;
3428         do_div(max_pages_per_mr, srp_dev->mr_page_size);
3429         srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3430                                           max_pages_per_mr);
3431         if (srp_dev->use_fast_reg) {
3432                 srp_dev->max_pages_per_mr =
3433                         min_t(u32, srp_dev->max_pages_per_mr,
3434                               dev_attr->max_fast_reg_page_list_len);
3435         }
3436         srp_dev->mr_max_size    = srp_dev->mr_page_size *
3437                                    srp_dev->max_pages_per_mr;
3438         pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3439                  device->name, mr_page_shift, dev_attr->max_mr_size,
3440                  dev_attr->max_fast_reg_page_list_len,
3441                  srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3442
3443         INIT_LIST_HEAD(&srp_dev->dev_list);
3444
3445         srp_dev->dev = device;
3446         srp_dev->pd  = ib_alloc_pd(device);
3447         if (IS_ERR(srp_dev->pd))
3448                 goto free_dev;
3449
3450         srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3451                                     IB_ACCESS_LOCAL_WRITE |
3452                                     IB_ACCESS_REMOTE_READ |
3453                                     IB_ACCESS_REMOTE_WRITE);
3454         if (IS_ERR(srp_dev->mr))
3455                 goto err_pd;
3456
3457         for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3458                 host = srp_add_port(srp_dev, p);
3459                 if (host)
3460                         list_add_tail(&host->list, &srp_dev->dev_list);
3461         }
3462
3463         ib_set_client_data(device, &srp_client, srp_dev);
3464
3465         goto free_attr;
3466
3467 err_pd:
3468         ib_dealloc_pd(srp_dev->pd);
3469
3470 free_dev:
3471         kfree(srp_dev);
3472
3473 free_attr:
3474         kfree(dev_attr);
3475 }
3476
3477 static void srp_remove_one(struct ib_device *device, void *client_data)
3478 {
3479         struct srp_device *srp_dev;
3480         struct srp_host *host, *tmp_host;
3481         struct srp_target_port *target;
3482
3483         srp_dev = client_data;
3484         if (!srp_dev)
3485                 return;
3486
3487         list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3488                 device_unregister(&host->dev);
3489                 /*
3490                  * Wait for the sysfs entry to go away, so that no new
3491                  * target ports can be created.
3492                  */
3493                 wait_for_completion(&host->released);
3494
3495                 /*
3496                  * Remove all target ports.
3497                  */
3498                 spin_lock(&host->target_lock);
3499                 list_for_each_entry(target, &host->target_list, list)
3500                         srp_queue_remove_work(target);
3501                 spin_unlock(&host->target_lock);
3502
3503                 /*
3504                  * Wait for tl_err and target port removal tasks.
3505                  */
3506                 flush_workqueue(system_long_wq);
3507                 flush_workqueue(srp_remove_wq);
3508
3509                 kfree(host);
3510         }
3511
3512         ib_dereg_mr(srp_dev->mr);
3513         ib_dealloc_pd(srp_dev->pd);
3514
3515         kfree(srp_dev);
3516 }
3517
3518 static struct srp_function_template ib_srp_transport_functions = {
3519         .has_rport_state         = true,
3520         .reset_timer_if_blocked  = true,
3521         .reconnect_delay         = &srp_reconnect_delay,
3522         .fast_io_fail_tmo        = &srp_fast_io_fail_tmo,
3523         .dev_loss_tmo            = &srp_dev_loss_tmo,
3524         .reconnect               = srp_rport_reconnect,
3525         .rport_delete            = srp_rport_delete,
3526         .terminate_rport_io      = srp_terminate_io,
3527 };
3528
3529 static int __init srp_init_module(void)
3530 {
3531         int ret;
3532
3533         BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3534
3535         if (srp_sg_tablesize) {
3536                 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3537                 if (!cmd_sg_entries)
3538                         cmd_sg_entries = srp_sg_tablesize;
3539         }
3540
3541         if (!cmd_sg_entries)
3542                 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3543
3544         if (cmd_sg_entries > 255) {
3545                 pr_warn("Clamping cmd_sg_entries to 255\n");
3546                 cmd_sg_entries = 255;
3547         }
3548
3549         if (!indirect_sg_entries)
3550                 indirect_sg_entries = cmd_sg_entries;
3551         else if (indirect_sg_entries < cmd_sg_entries) {
3552                 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3553                         cmd_sg_entries);
3554                 indirect_sg_entries = cmd_sg_entries;
3555         }
3556
3557         srp_remove_wq = create_workqueue("srp_remove");
3558         if (!srp_remove_wq) {
3559                 ret = -ENOMEM;
3560                 goto out;
3561         }
3562
3563         ret = -ENOMEM;
3564         ib_srp_transport_template =
3565                 srp_attach_transport(&ib_srp_transport_functions);
3566         if (!ib_srp_transport_template)
3567                 goto destroy_wq;
3568
3569         ret = class_register(&srp_class);
3570         if (ret) {
3571                 pr_err("couldn't register class infiniband_srp\n");
3572                 goto release_tr;
3573         }
3574
3575         ib_sa_register_client(&srp_sa_client);
3576
3577         ret = ib_register_client(&srp_client);
3578         if (ret) {
3579                 pr_err("couldn't register IB client\n");
3580                 goto unreg_sa;
3581         }
3582
3583 out:
3584         return ret;
3585
3586 unreg_sa:
3587         ib_sa_unregister_client(&srp_sa_client);
3588         class_unregister(&srp_class);
3589
3590 release_tr:
3591         srp_release_transport(ib_srp_transport_template);
3592
3593 destroy_wq:
3594         destroy_workqueue(srp_remove_wq);
3595         goto out;
3596 }
3597
3598 static void __exit srp_cleanup_module(void)
3599 {
3600         ib_unregister_client(&srp_client);
3601         ib_sa_unregister_client(&srp_sa_client);
3602         class_unregister(&srp_class);
3603         srp_release_transport(ib_srp_transport_template);
3604         destroy_workqueue(srp_remove_wq);
3605 }
3606
3607 module_init(srp_init_module);
3608 module_exit(srp_cleanup_module);