Merge tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[firefly-linux-kernel-4.4.55.git] / net / sunrpc / xprtrdma / verbs.c
1 /*
2  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  */
39
40 /*
41  * verbs.c
42  *
43  * Encapsulates the major functions managing:
44  *  o adapters
45  *  o endpoints
46  *  o connections
47  *  o buffer memory
48  */
49
50 #include <linux/interrupt.h>
51 #include <linux/slab.h>
52 #include <linux/prefetch.h>
53 #include <linux/sunrpc/addr.h>
54 #include <asm/bitops.h>
55 #include <linux/module.h> /* try_module_get()/module_put() */
56
57 #include "xprt_rdma.h"
58
59 /*
60  * Globals/Macros
61  */
62
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY        RPCDBG_TRANS
65 #endif
66
67 /*
68  * internal functions
69  */
70
71 /*
72  * handle replies in tasklet context, using a single, global list
73  * rdma tasklet function -- just turn around and call the func
74  * for all replies on the list
75  */
76
77 static DEFINE_SPINLOCK(rpcrdma_tk_lock_g);
78 static LIST_HEAD(rpcrdma_tasklets_g);
79
80 static void
81 rpcrdma_run_tasklet(unsigned long data)
82 {
83         struct rpcrdma_rep *rep;
84         unsigned long flags;
85
86         data = data;
87         spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
88         while (!list_empty(&rpcrdma_tasklets_g)) {
89                 rep = list_entry(rpcrdma_tasklets_g.next,
90                                  struct rpcrdma_rep, rr_list);
91                 list_del(&rep->rr_list);
92                 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
93
94                 rpcrdma_reply_handler(rep);
95
96                 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
97         }
98         spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
99 }
100
101 static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL);
102
103 static void
104 rpcrdma_schedule_tasklet(struct list_head *sched_list)
105 {
106         unsigned long flags;
107
108         spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
109         list_splice_tail(sched_list, &rpcrdma_tasklets_g);
110         spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
111         tasklet_schedule(&rpcrdma_tasklet_g);
112 }
113
114 static void
115 rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
116 {
117         struct rpcrdma_ep *ep = context;
118
119         pr_err("RPC:       %s: %s on device %s ep %p\n",
120                __func__, ib_event_msg(event->event),
121                 event->device->name, context);
122         if (ep->rep_connected == 1) {
123                 ep->rep_connected = -EIO;
124                 rpcrdma_conn_func(ep);
125                 wake_up_all(&ep->rep_connect_wait);
126         }
127 }
128
129 static void
130 rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
131 {
132         struct rpcrdma_ep *ep = context;
133
134         pr_err("RPC:       %s: %s on device %s ep %p\n",
135                __func__, ib_event_msg(event->event),
136                 event->device->name, context);
137         if (ep->rep_connected == 1) {
138                 ep->rep_connected = -EIO;
139                 rpcrdma_conn_func(ep);
140                 wake_up_all(&ep->rep_connect_wait);
141         }
142 }
143
144 static void
145 rpcrdma_sendcq_process_wc(struct ib_wc *wc)
146 {
147         /* WARNING: Only wr_id and status are reliable at this point */
148         if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) {
149                 if (wc->status != IB_WC_SUCCESS &&
150                     wc->status != IB_WC_WR_FLUSH_ERR)
151                         pr_err("RPC:       %s: SEND: %s\n",
152                                __func__, ib_wc_status_msg(wc->status));
153         } else {
154                 struct rpcrdma_mw *r;
155
156                 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
157                 r->mw_sendcompletion(wc);
158         }
159 }
160
161 static int
162 rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
163 {
164         struct ib_wc *wcs;
165         int budget, count, rc;
166
167         budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
168         do {
169                 wcs = ep->rep_send_wcs;
170
171                 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
172                 if (rc <= 0)
173                         return rc;
174
175                 count = rc;
176                 while (count-- > 0)
177                         rpcrdma_sendcq_process_wc(wcs++);
178         } while (rc == RPCRDMA_POLLSIZE && --budget);
179         return 0;
180 }
181
182 /*
183  * Handle send, fast_reg_mr, and local_inv completions.
184  *
185  * Send events are typically suppressed and thus do not result
186  * in an upcall. Occasionally one is signaled, however. This
187  * prevents the provider's completion queue from wrapping and
188  * losing a completion.
189  */
190 static void
191 rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
192 {
193         struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
194         int rc;
195
196         rc = rpcrdma_sendcq_poll(cq, ep);
197         if (rc) {
198                 dprintk("RPC:       %s: ib_poll_cq failed: %i\n",
199                         __func__, rc);
200                 return;
201         }
202
203         rc = ib_req_notify_cq(cq,
204                         IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
205         if (rc == 0)
206                 return;
207         if (rc < 0) {
208                 dprintk("RPC:       %s: ib_req_notify_cq failed: %i\n",
209                         __func__, rc);
210                 return;
211         }
212
213         rpcrdma_sendcq_poll(cq, ep);
214 }
215
216 static void
217 rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
218 {
219         struct rpcrdma_rep *rep =
220                         (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
221
222         /* WARNING: Only wr_id and status are reliable at this point */
223         if (wc->status != IB_WC_SUCCESS)
224                 goto out_fail;
225
226         /* status == SUCCESS means all fields in wc are trustworthy */
227         if (wc->opcode != IB_WC_RECV)
228                 return;
229
230         dprintk("RPC:       %s: rep %p opcode 'recv', length %u: success\n",
231                 __func__, rep, wc->byte_len);
232
233         rep->rr_len = wc->byte_len;
234         ib_dma_sync_single_for_cpu(rep->rr_device,
235                                    rdmab_addr(rep->rr_rdmabuf),
236                                    rep->rr_len, DMA_FROM_DEVICE);
237         prefetch(rdmab_to_msg(rep->rr_rdmabuf));
238
239 out_schedule:
240         list_add_tail(&rep->rr_list, sched_list);
241         return;
242 out_fail:
243         if (wc->status != IB_WC_WR_FLUSH_ERR)
244                 pr_err("RPC:       %s: rep %p: %s\n",
245                        __func__, rep, ib_wc_status_msg(wc->status));
246         rep->rr_len = ~0U;
247         goto out_schedule;
248 }
249
250 static int
251 rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
252 {
253         struct list_head sched_list;
254         struct ib_wc *wcs;
255         int budget, count, rc;
256
257         INIT_LIST_HEAD(&sched_list);
258         budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
259         do {
260                 wcs = ep->rep_recv_wcs;
261
262                 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
263                 if (rc <= 0)
264                         goto out_schedule;
265
266                 count = rc;
267                 while (count-- > 0)
268                         rpcrdma_recvcq_process_wc(wcs++, &sched_list);
269         } while (rc == RPCRDMA_POLLSIZE && --budget);
270         rc = 0;
271
272 out_schedule:
273         rpcrdma_schedule_tasklet(&sched_list);
274         return rc;
275 }
276
277 /*
278  * Handle receive completions.
279  *
280  * It is reentrant but processes single events in order to maintain
281  * ordering of receives to keep server credits.
282  *
283  * It is the responsibility of the scheduled tasklet to return
284  * recv buffers to the pool. NOTE: this affects synchronization of
285  * connection shutdown. That is, the structures required for
286  * the completion of the reply handler must remain intact until
287  * all memory has been reclaimed.
288  */
289 static void
290 rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
291 {
292         struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
293         int rc;
294
295         rc = rpcrdma_recvcq_poll(cq, ep);
296         if (rc) {
297                 dprintk("RPC:       %s: ib_poll_cq failed: %i\n",
298                         __func__, rc);
299                 return;
300         }
301
302         rc = ib_req_notify_cq(cq,
303                         IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
304         if (rc == 0)
305                 return;
306         if (rc < 0) {
307                 dprintk("RPC:       %s: ib_req_notify_cq failed: %i\n",
308                         __func__, rc);
309                 return;
310         }
311
312         rpcrdma_recvcq_poll(cq, ep);
313 }
314
315 static void
316 rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
317 {
318         struct ib_wc wc;
319         LIST_HEAD(sched_list);
320
321         while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
322                 rpcrdma_recvcq_process_wc(&wc, &sched_list);
323         if (!list_empty(&sched_list))
324                 rpcrdma_schedule_tasklet(&sched_list);
325         while (ib_poll_cq(ep->rep_attr.send_cq, 1, &wc) > 0)
326                 rpcrdma_sendcq_process_wc(&wc);
327 }
328
329 static int
330 rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
331 {
332         struct rpcrdma_xprt *xprt = id->context;
333         struct rpcrdma_ia *ia = &xprt->rx_ia;
334         struct rpcrdma_ep *ep = &xprt->rx_ep;
335 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
336         struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
337 #endif
338         struct ib_qp_attr *attr = &ia->ri_qp_attr;
339         struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr;
340         int connstate = 0;
341
342         switch (event->event) {
343         case RDMA_CM_EVENT_ADDR_RESOLVED:
344         case RDMA_CM_EVENT_ROUTE_RESOLVED:
345                 ia->ri_async_rc = 0;
346                 complete(&ia->ri_done);
347                 break;
348         case RDMA_CM_EVENT_ADDR_ERROR:
349                 ia->ri_async_rc = -EHOSTUNREACH;
350                 dprintk("RPC:       %s: CM address resolution error, ep 0x%p\n",
351                         __func__, ep);
352                 complete(&ia->ri_done);
353                 break;
354         case RDMA_CM_EVENT_ROUTE_ERROR:
355                 ia->ri_async_rc = -ENETUNREACH;
356                 dprintk("RPC:       %s: CM route resolution error, ep 0x%p\n",
357                         __func__, ep);
358                 complete(&ia->ri_done);
359                 break;
360         case RDMA_CM_EVENT_ESTABLISHED:
361                 connstate = 1;
362                 ib_query_qp(ia->ri_id->qp, attr,
363                             IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
364                             iattr);
365                 dprintk("RPC:       %s: %d responder resources"
366                         " (%d initiator)\n",
367                         __func__, attr->max_dest_rd_atomic,
368                         attr->max_rd_atomic);
369                 goto connected;
370         case RDMA_CM_EVENT_CONNECT_ERROR:
371                 connstate = -ENOTCONN;
372                 goto connected;
373         case RDMA_CM_EVENT_UNREACHABLE:
374                 connstate = -ENETDOWN;
375                 goto connected;
376         case RDMA_CM_EVENT_REJECTED:
377                 connstate = -ECONNREFUSED;
378                 goto connected;
379         case RDMA_CM_EVENT_DISCONNECTED:
380                 connstate = -ECONNABORTED;
381                 goto connected;
382         case RDMA_CM_EVENT_DEVICE_REMOVAL:
383                 connstate = -ENODEV;
384 connected:
385                 dprintk("RPC:       %s: %sconnected\n",
386                                         __func__, connstate > 0 ? "" : "dis");
387                 ep->rep_connected = connstate;
388                 rpcrdma_conn_func(ep);
389                 wake_up_all(&ep->rep_connect_wait);
390                 /*FALLTHROUGH*/
391         default:
392                 dprintk("RPC:       %s: %pIS:%u (ep 0x%p): %s\n",
393                         __func__, sap, rpc_get_port(sap), ep,
394                         rdma_event_msg(event->event));
395                 break;
396         }
397
398 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
399         if (connstate == 1) {
400                 int ird = attr->max_dest_rd_atomic;
401                 int tird = ep->rep_remote_cma.responder_resources;
402
403                 pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n",
404                         sap, rpc_get_port(sap),
405                         ia->ri_device->name,
406                         ia->ri_ops->ro_displayname,
407                         xprt->rx_buf.rb_max_requests,
408                         ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
409         } else if (connstate < 0) {
410                 pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n",
411                         sap, rpc_get_port(sap), connstate);
412         }
413 #endif
414
415         return 0;
416 }
417
418 static void rpcrdma_destroy_id(struct rdma_cm_id *id)
419 {
420         if (id) {
421                 module_put(id->device->owner);
422                 rdma_destroy_id(id);
423         }
424 }
425
426 static struct rdma_cm_id *
427 rpcrdma_create_id(struct rpcrdma_xprt *xprt,
428                         struct rpcrdma_ia *ia, struct sockaddr *addr)
429 {
430         struct rdma_cm_id *id;
431         int rc;
432
433         init_completion(&ia->ri_done);
434
435         id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
436                             IB_QPT_RC);
437         if (IS_ERR(id)) {
438                 rc = PTR_ERR(id);
439                 dprintk("RPC:       %s: rdma_create_id() failed %i\n",
440                         __func__, rc);
441                 return id;
442         }
443
444         ia->ri_async_rc = -ETIMEDOUT;
445         rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
446         if (rc) {
447                 dprintk("RPC:       %s: rdma_resolve_addr() failed %i\n",
448                         __func__, rc);
449                 goto out;
450         }
451         wait_for_completion_interruptible_timeout(&ia->ri_done,
452                                 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
453
454         /* FIXME:
455          * Until xprtrdma supports DEVICE_REMOVAL, the provider must
456          * be pinned while there are active NFS/RDMA mounts to prevent
457          * hangs and crashes at umount time.
458          */
459         if (!ia->ri_async_rc && !try_module_get(id->device->owner)) {
460                 dprintk("RPC:       %s: Failed to get device module\n",
461                         __func__);
462                 ia->ri_async_rc = -ENODEV;
463         }
464         rc = ia->ri_async_rc;
465         if (rc)
466                 goto out;
467
468         ia->ri_async_rc = -ETIMEDOUT;
469         rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
470         if (rc) {
471                 dprintk("RPC:       %s: rdma_resolve_route() failed %i\n",
472                         __func__, rc);
473                 goto put;
474         }
475         wait_for_completion_interruptible_timeout(&ia->ri_done,
476                                 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
477         rc = ia->ri_async_rc;
478         if (rc)
479                 goto put;
480
481         return id;
482 put:
483         module_put(id->device->owner);
484 out:
485         rdma_destroy_id(id);
486         return ERR_PTR(rc);
487 }
488
489 /*
490  * Drain any cq, prior to teardown.
491  */
492 static void
493 rpcrdma_clean_cq(struct ib_cq *cq)
494 {
495         struct ib_wc wc;
496         int count = 0;
497
498         while (1 == ib_poll_cq(cq, 1, &wc))
499                 ++count;
500
501         if (count)
502                 dprintk("RPC:       %s: flushed %d events (last 0x%x)\n",
503                         __func__, count, wc.opcode);
504 }
505
506 /*
507  * Exported functions.
508  */
509
510 /*
511  * Open and initialize an Interface Adapter.
512  *  o initializes fields of struct rpcrdma_ia, including
513  *    interface and provider attributes and protection zone.
514  */
515 int
516 rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
517 {
518         struct rpcrdma_ia *ia = &xprt->rx_ia;
519         struct ib_device_attr *devattr = &ia->ri_devattr;
520         int rc;
521
522         ia->ri_dma_mr = NULL;
523
524         ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
525         if (IS_ERR(ia->ri_id)) {
526                 rc = PTR_ERR(ia->ri_id);
527                 goto out1;
528         }
529         ia->ri_device = ia->ri_id->device;
530
531         ia->ri_pd = ib_alloc_pd(ia->ri_device);
532         if (IS_ERR(ia->ri_pd)) {
533                 rc = PTR_ERR(ia->ri_pd);
534                 dprintk("RPC:       %s: ib_alloc_pd() failed %i\n",
535                         __func__, rc);
536                 goto out2;
537         }
538
539         rc = ib_query_device(ia->ri_device, devattr);
540         if (rc) {
541                 dprintk("RPC:       %s: ib_query_device failed %d\n",
542                         __func__, rc);
543                 goto out3;
544         }
545
546         if (memreg == RPCRDMA_FRMR) {
547                 if (!(devattr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
548                     (devattr->max_fast_reg_page_list_len == 0)) {
549                         dprintk("RPC:       %s: FRMR registration "
550                                 "not supported by HCA\n", __func__);
551                         memreg = RPCRDMA_MTHCAFMR;
552                 }
553         }
554         if (memreg == RPCRDMA_MTHCAFMR) {
555                 if (!ia->ri_device->alloc_fmr) {
556                         dprintk("RPC:       %s: MTHCAFMR registration "
557                                 "not supported by HCA\n", __func__);
558                         rc = -EINVAL;
559                         goto out3;
560                 }
561         }
562
563         switch (memreg) {
564         case RPCRDMA_FRMR:
565                 ia->ri_ops = &rpcrdma_frwr_memreg_ops;
566                 break;
567         case RPCRDMA_ALLPHYSICAL:
568                 ia->ri_ops = &rpcrdma_physical_memreg_ops;
569                 break;
570         case RPCRDMA_MTHCAFMR:
571                 ia->ri_ops = &rpcrdma_fmr_memreg_ops;
572                 break;
573         default:
574                 printk(KERN_ERR "RPC: Unsupported memory "
575                                 "registration mode: %d\n", memreg);
576                 rc = -ENOMEM;
577                 goto out3;
578         }
579         dprintk("RPC:       %s: memory registration strategy is '%s'\n",
580                 __func__, ia->ri_ops->ro_displayname);
581
582         rwlock_init(&ia->ri_qplock);
583         return 0;
584
585 out3:
586         ib_dealloc_pd(ia->ri_pd);
587         ia->ri_pd = NULL;
588 out2:
589         rpcrdma_destroy_id(ia->ri_id);
590         ia->ri_id = NULL;
591 out1:
592         return rc;
593 }
594
595 /*
596  * Clean up/close an IA.
597  *   o if event handles and PD have been initialized, free them.
598  *   o close the IA
599  */
600 void
601 rpcrdma_ia_close(struct rpcrdma_ia *ia)
602 {
603         dprintk("RPC:       %s: entering\n", __func__);
604         if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
605                 if (ia->ri_id->qp)
606                         rdma_destroy_qp(ia->ri_id);
607                 rpcrdma_destroy_id(ia->ri_id);
608                 ia->ri_id = NULL;
609         }
610
611         /* If the pd is still busy, xprtrdma missed freeing a resource */
612         if (ia->ri_pd && !IS_ERR(ia->ri_pd))
613                 ib_dealloc_pd(ia->ri_pd);
614 }
615
616 /*
617  * Create unconnected endpoint.
618  */
619 int
620 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
621                                 struct rpcrdma_create_data_internal *cdata)
622 {
623         struct ib_device_attr *devattr = &ia->ri_devattr;
624         struct ib_cq *sendcq, *recvcq;
625         struct ib_cq_init_attr cq_attr = {};
626         int rc, err;
627
628         if (devattr->max_sge < RPCRDMA_MAX_IOVS) {
629                 dprintk("RPC:       %s: insufficient sge's available\n",
630                         __func__);
631                 return -ENOMEM;
632         }
633
634         /* check provider's send/recv wr limits */
635         if (cdata->max_requests > devattr->max_qp_wr)
636                 cdata->max_requests = devattr->max_qp_wr;
637
638         ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
639         ep->rep_attr.qp_context = ep;
640         ep->rep_attr.srq = NULL;
641         ep->rep_attr.cap.max_send_wr = cdata->max_requests;
642         rc = ia->ri_ops->ro_open(ia, ep, cdata);
643         if (rc)
644                 return rc;
645         ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
646         ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
647         ep->rep_attr.cap.max_recv_sge = 1;
648         ep->rep_attr.cap.max_inline_data = 0;
649         ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
650         ep->rep_attr.qp_type = IB_QPT_RC;
651         ep->rep_attr.port_num = ~0;
652
653         dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
654                 "iovs: send %d recv %d\n",
655                 __func__,
656                 ep->rep_attr.cap.max_send_wr,
657                 ep->rep_attr.cap.max_recv_wr,
658                 ep->rep_attr.cap.max_send_sge,
659                 ep->rep_attr.cap.max_recv_sge);
660
661         /* set trigger for requesting send completion */
662         ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
663         if (ep->rep_cqinit > RPCRDMA_MAX_UNSIGNALED_SENDS)
664                 ep->rep_cqinit = RPCRDMA_MAX_UNSIGNALED_SENDS;
665         else if (ep->rep_cqinit <= 2)
666                 ep->rep_cqinit = 0;
667         INIT_CQCOUNT(ep);
668         init_waitqueue_head(&ep->rep_connect_wait);
669         INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
670
671         cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1;
672         sendcq = ib_create_cq(ia->ri_device, rpcrdma_sendcq_upcall,
673                               rpcrdma_cq_async_error_upcall, ep, &cq_attr);
674         if (IS_ERR(sendcq)) {
675                 rc = PTR_ERR(sendcq);
676                 dprintk("RPC:       %s: failed to create send CQ: %i\n",
677                         __func__, rc);
678                 goto out1;
679         }
680
681         rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP);
682         if (rc) {
683                 dprintk("RPC:       %s: ib_req_notify_cq failed: %i\n",
684                         __func__, rc);
685                 goto out2;
686         }
687
688         cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1;
689         recvcq = ib_create_cq(ia->ri_device, rpcrdma_recvcq_upcall,
690                               rpcrdma_cq_async_error_upcall, ep, &cq_attr);
691         if (IS_ERR(recvcq)) {
692                 rc = PTR_ERR(recvcq);
693                 dprintk("RPC:       %s: failed to create recv CQ: %i\n",
694                         __func__, rc);
695                 goto out2;
696         }
697
698         rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
699         if (rc) {
700                 dprintk("RPC:       %s: ib_req_notify_cq failed: %i\n",
701                         __func__, rc);
702                 ib_destroy_cq(recvcq);
703                 goto out2;
704         }
705
706         ep->rep_attr.send_cq = sendcq;
707         ep->rep_attr.recv_cq = recvcq;
708
709         /* Initialize cma parameters */
710
711         /* RPC/RDMA does not use private data */
712         ep->rep_remote_cma.private_data = NULL;
713         ep->rep_remote_cma.private_data_len = 0;
714
715         /* Client offers RDMA Read but does not initiate */
716         ep->rep_remote_cma.initiator_depth = 0;
717         if (devattr->max_qp_rd_atom > 32)       /* arbitrary but <= 255 */
718                 ep->rep_remote_cma.responder_resources = 32;
719         else
720                 ep->rep_remote_cma.responder_resources =
721                                                 devattr->max_qp_rd_atom;
722
723         ep->rep_remote_cma.retry_count = 7;
724         ep->rep_remote_cma.flow_control = 0;
725         ep->rep_remote_cma.rnr_retry_count = 0;
726
727         return 0;
728
729 out2:
730         err = ib_destroy_cq(sendcq);
731         if (err)
732                 dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
733                         __func__, err);
734 out1:
735         if (ia->ri_dma_mr)
736                 ib_dereg_mr(ia->ri_dma_mr);
737         return rc;
738 }
739
740 /*
741  * rpcrdma_ep_destroy
742  *
743  * Disconnect and destroy endpoint. After this, the only
744  * valid operations on the ep are to free it (if dynamically
745  * allocated) or re-create it.
746  */
747 void
748 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
749 {
750         int rc;
751
752         dprintk("RPC:       %s: entering, connected is %d\n",
753                 __func__, ep->rep_connected);
754
755         cancel_delayed_work_sync(&ep->rep_connect_worker);
756
757         if (ia->ri_id->qp)
758                 rpcrdma_ep_disconnect(ep, ia);
759
760         rpcrdma_clean_cq(ep->rep_attr.recv_cq);
761         rpcrdma_clean_cq(ep->rep_attr.send_cq);
762
763         if (ia->ri_id->qp) {
764                 rdma_destroy_qp(ia->ri_id);
765                 ia->ri_id->qp = NULL;
766         }
767
768         rc = ib_destroy_cq(ep->rep_attr.recv_cq);
769         if (rc)
770                 dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
771                         __func__, rc);
772
773         rc = ib_destroy_cq(ep->rep_attr.send_cq);
774         if (rc)
775                 dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
776                         __func__, rc);
777
778         if (ia->ri_dma_mr) {
779                 rc = ib_dereg_mr(ia->ri_dma_mr);
780                 dprintk("RPC:       %s: ib_dereg_mr returned %i\n",
781                         __func__, rc);
782         }
783 }
784
785 /*
786  * Connect unconnected endpoint.
787  */
788 int
789 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
790 {
791         struct rdma_cm_id *id, *old;
792         int rc = 0;
793         int retry_count = 0;
794
795         if (ep->rep_connected != 0) {
796                 struct rpcrdma_xprt *xprt;
797 retry:
798                 dprintk("RPC:       %s: reconnecting...\n", __func__);
799
800                 rpcrdma_ep_disconnect(ep, ia);
801                 rpcrdma_flush_cqs(ep);
802
803                 xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
804                 id = rpcrdma_create_id(xprt, ia,
805                                 (struct sockaddr *)&xprt->rx_data.addr);
806                 if (IS_ERR(id)) {
807                         rc = -EHOSTUNREACH;
808                         goto out;
809                 }
810                 /* TEMP TEMP TEMP - fail if new device:
811                  * Deregister/remarshal *all* requests!
812                  * Close and recreate adapter, pd, etc!
813                  * Re-determine all attributes still sane!
814                  * More stuff I haven't thought of!
815                  * Rrrgh!
816                  */
817                 if (ia->ri_device != id->device) {
818                         printk("RPC:       %s: can't reconnect on "
819                                 "different device!\n", __func__);
820                         rpcrdma_destroy_id(id);
821                         rc = -ENETUNREACH;
822                         goto out;
823                 }
824                 /* END TEMP */
825                 rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
826                 if (rc) {
827                         dprintk("RPC:       %s: rdma_create_qp failed %i\n",
828                                 __func__, rc);
829                         rpcrdma_destroy_id(id);
830                         rc = -ENETUNREACH;
831                         goto out;
832                 }
833
834                 write_lock(&ia->ri_qplock);
835                 old = ia->ri_id;
836                 ia->ri_id = id;
837                 write_unlock(&ia->ri_qplock);
838
839                 rdma_destroy_qp(old);
840                 rpcrdma_destroy_id(old);
841         } else {
842                 dprintk("RPC:       %s: connecting...\n", __func__);
843                 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
844                 if (rc) {
845                         dprintk("RPC:       %s: rdma_create_qp failed %i\n",
846                                 __func__, rc);
847                         /* do not update ep->rep_connected */
848                         return -ENETUNREACH;
849                 }
850         }
851
852         ep->rep_connected = 0;
853
854         rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
855         if (rc) {
856                 dprintk("RPC:       %s: rdma_connect() failed with %i\n",
857                                 __func__, rc);
858                 goto out;
859         }
860
861         wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
862
863         /*
864          * Check state. A non-peer reject indicates no listener
865          * (ECONNREFUSED), which may be a transient state. All
866          * others indicate a transport condition which has already
867          * undergone a best-effort.
868          */
869         if (ep->rep_connected == -ECONNREFUSED &&
870             ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
871                 dprintk("RPC:       %s: non-peer_reject, retry\n", __func__);
872                 goto retry;
873         }
874         if (ep->rep_connected <= 0) {
875                 /* Sometimes, the only way to reliably connect to remote
876                  * CMs is to use same nonzero values for ORD and IRD. */
877                 if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
878                     (ep->rep_remote_cma.responder_resources == 0 ||
879                      ep->rep_remote_cma.initiator_depth !=
880                                 ep->rep_remote_cma.responder_resources)) {
881                         if (ep->rep_remote_cma.responder_resources == 0)
882                                 ep->rep_remote_cma.responder_resources = 1;
883                         ep->rep_remote_cma.initiator_depth =
884                                 ep->rep_remote_cma.responder_resources;
885                         goto retry;
886                 }
887                 rc = ep->rep_connected;
888         } else {
889                 dprintk("RPC:       %s: connected\n", __func__);
890         }
891
892 out:
893         if (rc)
894                 ep->rep_connected = rc;
895         return rc;
896 }
897
898 /*
899  * rpcrdma_ep_disconnect
900  *
901  * This is separate from destroy to facilitate the ability
902  * to reconnect without recreating the endpoint.
903  *
904  * This call is not reentrant, and must not be made in parallel
905  * on the same endpoint.
906  */
907 void
908 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
909 {
910         int rc;
911
912         rpcrdma_flush_cqs(ep);
913         rc = rdma_disconnect(ia->ri_id);
914         if (!rc) {
915                 /* returns without wait if not connected */
916                 wait_event_interruptible(ep->rep_connect_wait,
917                                                         ep->rep_connected != 1);
918                 dprintk("RPC:       %s: after wait, %sconnected\n", __func__,
919                         (ep->rep_connected == 1) ? "still " : "dis");
920         } else {
921                 dprintk("RPC:       %s: rdma_disconnect %i\n", __func__, rc);
922                 ep->rep_connected = rc;
923         }
924 }
925
926 static struct rpcrdma_req *
927 rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
928 {
929         struct rpcrdma_req *req;
930
931         req = kzalloc(sizeof(*req), GFP_KERNEL);
932         if (req == NULL)
933                 return ERR_PTR(-ENOMEM);
934
935         req->rl_buffer = &r_xprt->rx_buf;
936         return req;
937 }
938
939 static struct rpcrdma_rep *
940 rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
941 {
942         struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
943         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
944         struct rpcrdma_rep *rep;
945         int rc;
946
947         rc = -ENOMEM;
948         rep = kzalloc(sizeof(*rep), GFP_KERNEL);
949         if (rep == NULL)
950                 goto out;
951
952         rep->rr_rdmabuf = rpcrdma_alloc_regbuf(ia, cdata->inline_rsize,
953                                                GFP_KERNEL);
954         if (IS_ERR(rep->rr_rdmabuf)) {
955                 rc = PTR_ERR(rep->rr_rdmabuf);
956                 goto out_free;
957         }
958
959         rep->rr_device = ia->ri_device;
960         rep->rr_rxprt = r_xprt;
961         return rep;
962
963 out_free:
964         kfree(rep);
965 out:
966         return ERR_PTR(rc);
967 }
968
969 int
970 rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
971 {
972         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
973         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
974         struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
975         char *p;
976         size_t len;
977         int i, rc;
978
979         buf->rb_max_requests = cdata->max_requests;
980         spin_lock_init(&buf->rb_lock);
981
982         /* Need to allocate:
983          *   1.  arrays for send and recv pointers
984          *   2.  arrays of struct rpcrdma_req to fill in pointers
985          *   3.  array of struct rpcrdma_rep for replies
986          * Send/recv buffers in req/rep need to be registered
987          */
988         len = buf->rb_max_requests *
989                 (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
990
991         p = kzalloc(len, GFP_KERNEL);
992         if (p == NULL) {
993                 dprintk("RPC:       %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
994                         __func__, len);
995                 rc = -ENOMEM;
996                 goto out;
997         }
998         buf->rb_pool = p;       /* for freeing it later */
999
1000         buf->rb_send_bufs = (struct rpcrdma_req **) p;
1001         p = (char *) &buf->rb_send_bufs[buf->rb_max_requests];
1002         buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
1003         p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
1004
1005         rc = ia->ri_ops->ro_init(r_xprt);
1006         if (rc)
1007                 goto out;
1008
1009         for (i = 0; i < buf->rb_max_requests; i++) {
1010                 struct rpcrdma_req *req;
1011                 struct rpcrdma_rep *rep;
1012
1013                 req = rpcrdma_create_req(r_xprt);
1014                 if (IS_ERR(req)) {
1015                         dprintk("RPC:       %s: request buffer %d alloc"
1016                                 " failed\n", __func__, i);
1017                         rc = PTR_ERR(req);
1018                         goto out;
1019                 }
1020                 buf->rb_send_bufs[i] = req;
1021
1022                 rep = rpcrdma_create_rep(r_xprt);
1023                 if (IS_ERR(rep)) {
1024                         dprintk("RPC:       %s: reply buffer %d alloc failed\n",
1025                                 __func__, i);
1026                         rc = PTR_ERR(rep);
1027                         goto out;
1028                 }
1029                 buf->rb_recv_bufs[i] = rep;
1030         }
1031
1032         return 0;
1033 out:
1034         rpcrdma_buffer_destroy(buf);
1035         return rc;
1036 }
1037
1038 static void
1039 rpcrdma_destroy_rep(struct rpcrdma_ia *ia, struct rpcrdma_rep *rep)
1040 {
1041         if (!rep)
1042                 return;
1043
1044         rpcrdma_free_regbuf(ia, rep->rr_rdmabuf);
1045         kfree(rep);
1046 }
1047
1048 static void
1049 rpcrdma_destroy_req(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
1050 {
1051         if (!req)
1052                 return;
1053
1054         rpcrdma_free_regbuf(ia, req->rl_sendbuf);
1055         rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
1056         kfree(req);
1057 }
1058
1059 void
1060 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1061 {
1062         struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1063         int i;
1064
1065         /* clean up in reverse order from create
1066          *   1.  recv mr memory (mr free, then kfree)
1067          *   2.  send mr memory (mr free, then kfree)
1068          *   3.  MWs
1069          */
1070         dprintk("RPC:       %s: entering\n", __func__);
1071
1072         for (i = 0; i < buf->rb_max_requests; i++) {
1073                 if (buf->rb_recv_bufs)
1074                         rpcrdma_destroy_rep(ia, buf->rb_recv_bufs[i]);
1075                 if (buf->rb_send_bufs)
1076                         rpcrdma_destroy_req(ia, buf->rb_send_bufs[i]);
1077         }
1078
1079         ia->ri_ops->ro_destroy(buf);
1080
1081         kfree(buf->rb_pool);
1082 }
1083
1084 struct rpcrdma_mw *
1085 rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
1086 {
1087         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1088         struct rpcrdma_mw *mw = NULL;
1089
1090         spin_lock(&buf->rb_mwlock);
1091         if (!list_empty(&buf->rb_mws)) {
1092                 mw = list_first_entry(&buf->rb_mws,
1093                                       struct rpcrdma_mw, mw_list);
1094                 list_del_init(&mw->mw_list);
1095         }
1096         spin_unlock(&buf->rb_mwlock);
1097
1098         if (!mw)
1099                 pr_err("RPC:       %s: no MWs available\n", __func__);
1100         return mw;
1101 }
1102
1103 void
1104 rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
1105 {
1106         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1107
1108         spin_lock(&buf->rb_mwlock);
1109         list_add_tail(&mw->mw_list, &buf->rb_mws);
1110         spin_unlock(&buf->rb_mwlock);
1111 }
1112
1113 static void
1114 rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1115 {
1116         buf->rb_send_bufs[--buf->rb_send_index] = req;
1117         req->rl_niovs = 0;
1118         if (req->rl_reply) {
1119                 buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
1120                 req->rl_reply = NULL;
1121         }
1122 }
1123
1124 /*
1125  * Get a set of request/reply buffers.
1126  *
1127  * Reply buffer (if needed) is attached to send buffer upon return.
1128  * Rule:
1129  *    rb_send_index and rb_recv_index MUST always be pointing to the
1130  *    *next* available buffer (non-NULL). They are incremented after
1131  *    removing buffers, and decremented *before* returning them.
1132  */
1133 struct rpcrdma_req *
1134 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1135 {
1136         struct rpcrdma_req *req;
1137         unsigned long flags;
1138
1139         spin_lock_irqsave(&buffers->rb_lock, flags);
1140
1141         if (buffers->rb_send_index == buffers->rb_max_requests) {
1142                 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1143                 dprintk("RPC:       %s: out of request buffers\n", __func__);
1144                 return ((struct rpcrdma_req *)NULL);
1145         }
1146
1147         req = buffers->rb_send_bufs[buffers->rb_send_index];
1148         if (buffers->rb_send_index < buffers->rb_recv_index) {
1149                 dprintk("RPC:       %s: %d extra receives outstanding (ok)\n",
1150                         __func__,
1151                         buffers->rb_recv_index - buffers->rb_send_index);
1152                 req->rl_reply = NULL;
1153         } else {
1154                 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1155                 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1156         }
1157         buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
1158
1159         spin_unlock_irqrestore(&buffers->rb_lock, flags);
1160         return req;
1161 }
1162
1163 /*
1164  * Put request/reply buffers back into pool.
1165  * Pre-decrement counter/array index.
1166  */
1167 void
1168 rpcrdma_buffer_put(struct rpcrdma_req *req)
1169 {
1170         struct rpcrdma_buffer *buffers = req->rl_buffer;
1171         unsigned long flags;
1172
1173         spin_lock_irqsave(&buffers->rb_lock, flags);
1174         rpcrdma_buffer_put_sendbuf(req, buffers);
1175         spin_unlock_irqrestore(&buffers->rb_lock, flags);
1176 }
1177
1178 /*
1179  * Recover reply buffers from pool.
1180  * This happens when recovering from error conditions.
1181  * Post-increment counter/array index.
1182  */
1183 void
1184 rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1185 {
1186         struct rpcrdma_buffer *buffers = req->rl_buffer;
1187         unsigned long flags;
1188
1189         spin_lock_irqsave(&buffers->rb_lock, flags);
1190         if (buffers->rb_recv_index < buffers->rb_max_requests) {
1191                 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1192                 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1193         }
1194         spin_unlock_irqrestore(&buffers->rb_lock, flags);
1195 }
1196
1197 /*
1198  * Put reply buffers back into pool when not attached to
1199  * request. This happens in error conditions.
1200  */
1201 void
1202 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1203 {
1204         struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1205         unsigned long flags;
1206
1207         spin_lock_irqsave(&buffers->rb_lock, flags);
1208         buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep;
1209         spin_unlock_irqrestore(&buffers->rb_lock, flags);
1210 }
1211
1212 /*
1213  * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1214  */
1215
1216 void
1217 rpcrdma_mapping_error(struct rpcrdma_mr_seg *seg)
1218 {
1219         dprintk("RPC:       map_one: offset %p iova %llx len %zu\n",
1220                 seg->mr_offset,
1221                 (unsigned long long)seg->mr_dma, seg->mr_dmalen);
1222 }
1223
1224 /**
1225  * rpcrdma_alloc_regbuf - kmalloc and register memory for SEND/RECV buffers
1226  * @ia: controlling rpcrdma_ia
1227  * @size: size of buffer to be allocated, in bytes
1228  * @flags: GFP flags
1229  *
1230  * Returns pointer to private header of an area of internally
1231  * registered memory, or an ERR_PTR. The registered buffer follows
1232  * the end of the private header.
1233  *
1234  * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1235  * receiving the payload of RDMA RECV operations. regbufs are not
1236  * used for RDMA READ/WRITE operations, thus are registered only for
1237  * LOCAL access.
1238  */
1239 struct rpcrdma_regbuf *
1240 rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
1241 {
1242         struct rpcrdma_regbuf *rb;
1243         struct ib_sge *iov;
1244
1245         rb = kmalloc(sizeof(*rb) + size, flags);
1246         if (rb == NULL)
1247                 goto out;
1248
1249         iov = &rb->rg_iov;
1250         iov->addr = ib_dma_map_single(ia->ri_device,
1251                                       (void *)rb->rg_base, size,
1252                                       DMA_BIDIRECTIONAL);
1253         if (ib_dma_mapping_error(ia->ri_device, iov->addr))
1254                 goto out_free;
1255
1256         iov->length = size;
1257         iov->lkey = ia->ri_pd->local_dma_lkey;
1258         rb->rg_size = size;
1259         rb->rg_owner = NULL;
1260         return rb;
1261
1262 out_free:
1263         kfree(rb);
1264 out:
1265         return ERR_PTR(-ENOMEM);
1266 }
1267
1268 /**
1269  * rpcrdma_free_regbuf - deregister and free registered buffer
1270  * @ia: controlling rpcrdma_ia
1271  * @rb: regbuf to be deregistered and freed
1272  */
1273 void
1274 rpcrdma_free_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1275 {
1276         struct ib_sge *iov;
1277
1278         if (!rb)
1279                 return;
1280
1281         iov = &rb->rg_iov;
1282         ib_dma_unmap_single(ia->ri_device,
1283                             iov->addr, iov->length, DMA_BIDIRECTIONAL);
1284         kfree(rb);
1285 }
1286
1287 /*
1288  * Prepost any receive buffer, then post send.
1289  *
1290  * Receive buffer is donated to hardware, reclaimed upon recv completion.
1291  */
1292 int
1293 rpcrdma_ep_post(struct rpcrdma_ia *ia,
1294                 struct rpcrdma_ep *ep,
1295                 struct rpcrdma_req *req)
1296 {
1297         struct ib_device *device = ia->ri_device;
1298         struct ib_send_wr send_wr, *send_wr_fail;
1299         struct rpcrdma_rep *rep = req->rl_reply;
1300         struct ib_sge *iov = req->rl_send_iov;
1301         int i, rc;
1302
1303         if (rep) {
1304                 rc = rpcrdma_ep_post_recv(ia, ep, rep);
1305                 if (rc)
1306                         goto out;
1307                 req->rl_reply = NULL;
1308         }
1309
1310         send_wr.next = NULL;
1311         send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION;
1312         send_wr.sg_list = iov;
1313         send_wr.num_sge = req->rl_niovs;
1314         send_wr.opcode = IB_WR_SEND;
1315
1316         for (i = 0; i < send_wr.num_sge; i++)
1317                 ib_dma_sync_single_for_device(device, iov[i].addr,
1318                                               iov[i].length, DMA_TO_DEVICE);
1319         dprintk("RPC:       %s: posting %d s/g entries\n",
1320                 __func__, send_wr.num_sge);
1321
1322         if (DECR_CQCOUNT(ep) > 0)
1323                 send_wr.send_flags = 0;
1324         else { /* Provider must take a send completion every now and then */
1325                 INIT_CQCOUNT(ep);
1326                 send_wr.send_flags = IB_SEND_SIGNALED;
1327         }
1328
1329         rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
1330         if (rc)
1331                 dprintk("RPC:       %s: ib_post_send returned %i\n", __func__,
1332                         rc);
1333 out:
1334         return rc;
1335 }
1336
1337 /*
1338  * (Re)post a receive buffer.
1339  */
1340 int
1341 rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
1342                      struct rpcrdma_ep *ep,
1343                      struct rpcrdma_rep *rep)
1344 {
1345         struct ib_recv_wr recv_wr, *recv_wr_fail;
1346         int rc;
1347
1348         recv_wr.next = NULL;
1349         recv_wr.wr_id = (u64) (unsigned long) rep;
1350         recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1351         recv_wr.num_sge = 1;
1352
1353         ib_dma_sync_single_for_cpu(ia->ri_device,
1354                                    rdmab_addr(rep->rr_rdmabuf),
1355                                    rdmab_length(rep->rr_rdmabuf),
1356                                    DMA_BIDIRECTIONAL);
1357
1358         rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
1359
1360         if (rc)
1361                 dprintk("RPC:       %s: ib_post_recv returned %i\n", __func__,
1362                         rc);
1363         return rc;
1364 }
1365
1366 /* How many chunk list items fit within our inline buffers?
1367  */
1368 unsigned int
1369 rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
1370 {
1371         struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1372         int bytes, segments;
1373
1374         bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
1375         bytes -= RPCRDMA_HDRLEN_MIN;
1376         if (bytes < sizeof(struct rpcrdma_segment) * 2) {
1377                 pr_warn("RPC:       %s: inline threshold too small\n",
1378                         __func__);
1379                 return 0;
1380         }
1381
1382         segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
1383         dprintk("RPC:       %s: max chunk list size = %d segments\n",
1384                 __func__, segments);
1385         return segments;
1386 }