IB/core, cma: Make __attribute_const__ declarations sparse-friendly
[firefly-linux-kernel-4.4.55.git] / net / sunrpc / xprtrdma / svc_rdma_recvfrom.c
1 /*
2  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the BSD-type
9  * license below:
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  *      Redistributions of source code must retain the above copyright
16  *      notice, this list of conditions and the following disclaimer.
17  *
18  *      Redistributions in binary form must reproduce the above
19  *      copyright notice, this list of conditions and the following
20  *      disclaimer in the documentation and/or other materials provided
21  *      with the distribution.
22  *
23  *      Neither the name of the Network Appliance, Inc. nor the names of
24  *      its contributors may be used to endorse or promote products
25  *      derived from this software without specific prior written
26  *      permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Author: Tom Tucker <tom@opengridcomputing.com>
41  */
42
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <asm/unaligned.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/rdma_cm.h>
49 #include <linux/sunrpc/svc_rdma.h>
50
51 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
52
53 /*
54  * Replace the pages in the rq_argpages array with the pages from the SGE in
55  * the RDMA_RECV completion. The SGL should contain full pages up until the
56  * last one.
57  */
58 static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
59                                struct svc_rdma_op_ctxt *ctxt,
60                                u32 byte_count)
61 {
62         struct rpcrdma_msg *rmsgp;
63         struct page *page;
64         u32 bc;
65         int sge_no;
66
67         /* Swap the page in the SGE with the page in argpages */
68         page = ctxt->pages[0];
69         put_page(rqstp->rq_pages[0]);
70         rqstp->rq_pages[0] = page;
71
72         /* Set up the XDR head */
73         rqstp->rq_arg.head[0].iov_base = page_address(page);
74         rqstp->rq_arg.head[0].iov_len =
75                 min_t(size_t, byte_count, ctxt->sge[0].length);
76         rqstp->rq_arg.len = byte_count;
77         rqstp->rq_arg.buflen = byte_count;
78
79         /* Compute bytes past head in the SGL */
80         bc = byte_count - rqstp->rq_arg.head[0].iov_len;
81
82         /* If data remains, store it in the pagelist */
83         rqstp->rq_arg.page_len = bc;
84         rqstp->rq_arg.page_base = 0;
85
86         /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
87         rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
88         if (rmsgp->rm_type == rdma_nomsg)
89                 rqstp->rq_arg.pages = &rqstp->rq_pages[0];
90         else
91                 rqstp->rq_arg.pages = &rqstp->rq_pages[1];
92
93         sge_no = 1;
94         while (bc && sge_no < ctxt->count) {
95                 page = ctxt->pages[sge_no];
96                 put_page(rqstp->rq_pages[sge_no]);
97                 rqstp->rq_pages[sge_no] = page;
98                 bc -= min_t(u32, bc, ctxt->sge[sge_no].length);
99                 rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
100                 sge_no++;
101         }
102         rqstp->rq_respages = &rqstp->rq_pages[sge_no];
103         rqstp->rq_next_page = rqstp->rq_respages + 1;
104
105         /* If not all pages were used from the SGL, free the remaining ones */
106         bc = sge_no;
107         while (sge_no < ctxt->count) {
108                 page = ctxt->pages[sge_no++];
109                 put_page(page);
110         }
111         ctxt->count = bc;
112
113         /* Set up tail */
114         rqstp->rq_arg.tail[0].iov_base = NULL;
115         rqstp->rq_arg.tail[0].iov_len = 0;
116 }
117
118 /* Issue an RDMA_READ using the local lkey to map the data sink */
119 int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
120                         struct svc_rqst *rqstp,
121                         struct svc_rdma_op_ctxt *head,
122                         int *page_no,
123                         u32 *page_offset,
124                         u32 rs_handle,
125                         u32 rs_length,
126                         u64 rs_offset,
127                         bool last)
128 {
129         struct ib_rdma_wr read_wr;
130         int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
131         struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
132         int ret, read, pno;
133         u32 pg_off = *page_offset;
134         u32 pg_no = *page_no;
135
136         ctxt->direction = DMA_FROM_DEVICE;
137         ctxt->read_hdr = head;
138         pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
139         read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
140
141         for (pno = 0; pno < pages_needed; pno++) {
142                 int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
143
144                 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
145                 head->arg.page_len += len;
146                 head->arg.len += len;
147                 if (!pg_off)
148                         head->count++;
149                 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
150                 rqstp->rq_next_page = rqstp->rq_respages + 1;
151                 ctxt->sge[pno].addr =
152                         ib_dma_map_page(xprt->sc_cm_id->device,
153                                         head->arg.pages[pg_no], pg_off,
154                                         PAGE_SIZE - pg_off,
155                                         DMA_FROM_DEVICE);
156                 ret = ib_dma_mapping_error(xprt->sc_cm_id->device,
157                                            ctxt->sge[pno].addr);
158                 if (ret)
159                         goto err;
160                 atomic_inc(&xprt->sc_dma_used);
161
162                 /* The lkey here is either a local dma lkey or a dma_mr lkey */
163                 ctxt->sge[pno].lkey = xprt->sc_dma_lkey;
164                 ctxt->sge[pno].length = len;
165                 ctxt->count++;
166
167                 /* adjust offset and wrap to next page if needed */
168                 pg_off += len;
169                 if (pg_off == PAGE_SIZE) {
170                         pg_off = 0;
171                         pg_no++;
172                 }
173                 rs_length -= len;
174         }
175
176         if (last && rs_length == 0)
177                 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
178         else
179                 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
180
181         memset(&read_wr, 0, sizeof(read_wr));
182         read_wr.wr.wr_id = (unsigned long)ctxt;
183         read_wr.wr.opcode = IB_WR_RDMA_READ;
184         ctxt->wr_op = read_wr.wr.opcode;
185         read_wr.wr.send_flags = IB_SEND_SIGNALED;
186         read_wr.rkey = rs_handle;
187         read_wr.remote_addr = rs_offset;
188         read_wr.wr.sg_list = ctxt->sge;
189         read_wr.wr.num_sge = pages_needed;
190
191         ret = svc_rdma_send(xprt, &read_wr.wr);
192         if (ret) {
193                 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
194                 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
195                 goto err;
196         }
197
198         /* return current location in page array */
199         *page_no = pg_no;
200         *page_offset = pg_off;
201         ret = read;
202         atomic_inc(&rdma_stat_read);
203         return ret;
204  err:
205         svc_rdma_unmap_dma(ctxt);
206         svc_rdma_put_context(ctxt, 0);
207         return ret;
208 }
209
210 /* Issue an RDMA_READ using an FRMR to map the data sink */
211 int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
212                          struct svc_rqst *rqstp,
213                          struct svc_rdma_op_ctxt *head,
214                          int *page_no,
215                          u32 *page_offset,
216                          u32 rs_handle,
217                          u32 rs_length,
218                          u64 rs_offset,
219                          bool last)
220 {
221         struct ib_rdma_wr read_wr;
222         struct ib_send_wr inv_wr;
223         struct ib_reg_wr reg_wr;
224         u8 key;
225         int nents = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
226         struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
227         struct svc_rdma_fastreg_mr *frmr = svc_rdma_get_frmr(xprt);
228         int ret, read, pno, dma_nents, n;
229         u32 pg_off = *page_offset;
230         u32 pg_no = *page_no;
231
232         if (IS_ERR(frmr))
233                 return -ENOMEM;
234
235         ctxt->direction = DMA_FROM_DEVICE;
236         ctxt->frmr = frmr;
237         nents = min_t(unsigned int, nents, xprt->sc_frmr_pg_list_len);
238         read = min_t(int, nents << PAGE_SHIFT, rs_length);
239
240         frmr->direction = DMA_FROM_DEVICE;
241         frmr->access_flags = (IB_ACCESS_LOCAL_WRITE|IB_ACCESS_REMOTE_WRITE);
242         frmr->sg_nents = nents;
243
244         for (pno = 0; pno < nents; pno++) {
245                 int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
246
247                 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
248                 head->arg.page_len += len;
249                 head->arg.len += len;
250                 if (!pg_off)
251                         head->count++;
252
253                 sg_set_page(&frmr->sg[pno], rqstp->rq_arg.pages[pg_no],
254                             len, pg_off);
255
256                 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
257                 rqstp->rq_next_page = rqstp->rq_respages + 1;
258
259                 /* adjust offset and wrap to next page if needed */
260                 pg_off += len;
261                 if (pg_off == PAGE_SIZE) {
262                         pg_off = 0;
263                         pg_no++;
264                 }
265                 rs_length -= len;
266         }
267
268         if (last && rs_length == 0)
269                 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
270         else
271                 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
272
273         dma_nents = ib_dma_map_sg(xprt->sc_cm_id->device,
274                                   frmr->sg, frmr->sg_nents,
275                                   frmr->direction);
276         if (!dma_nents) {
277                 pr_err("svcrdma: failed to dma map sg %p\n",
278                        frmr->sg);
279                 return -ENOMEM;
280         }
281         atomic_inc(&xprt->sc_dma_used);
282
283         n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, PAGE_SIZE);
284         if (unlikely(n != frmr->sg_nents)) {
285                 pr_err("svcrdma: failed to map mr %p (%d/%d elements)\n",
286                        frmr->mr, n, frmr->sg_nents);
287                 return n < 0 ? n : -EINVAL;
288         }
289
290         /* Bump the key */
291         key = (u8)(frmr->mr->lkey & 0x000000FF);
292         ib_update_fast_reg_key(frmr->mr, ++key);
293
294         ctxt->sge[0].addr = frmr->mr->iova;
295         ctxt->sge[0].lkey = frmr->mr->lkey;
296         ctxt->sge[0].length = frmr->mr->length;
297         ctxt->count = 1;
298         ctxt->read_hdr = head;
299
300         /* Prepare REG WR */
301         reg_wr.wr.opcode = IB_WR_REG_MR;
302         reg_wr.wr.wr_id = 0;
303         reg_wr.wr.send_flags = IB_SEND_SIGNALED;
304         reg_wr.wr.num_sge = 0;
305         reg_wr.mr = frmr->mr;
306         reg_wr.key = frmr->mr->lkey;
307         reg_wr.access = frmr->access_flags;
308         reg_wr.wr.next = &read_wr.wr;
309
310         /* Prepare RDMA_READ */
311         memset(&read_wr, 0, sizeof(read_wr));
312         read_wr.wr.send_flags = IB_SEND_SIGNALED;
313         read_wr.rkey = rs_handle;
314         read_wr.remote_addr = rs_offset;
315         read_wr.wr.sg_list = ctxt->sge;
316         read_wr.wr.num_sge = 1;
317         if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) {
318                 read_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
319                 read_wr.wr.wr_id = (unsigned long)ctxt;
320                 read_wr.wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;
321         } else {
322                 read_wr.wr.opcode = IB_WR_RDMA_READ;
323                 read_wr.wr.next = &inv_wr;
324                 /* Prepare invalidate */
325                 memset(&inv_wr, 0, sizeof(inv_wr));
326                 inv_wr.wr_id = (unsigned long)ctxt;
327                 inv_wr.opcode = IB_WR_LOCAL_INV;
328                 inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE;
329                 inv_wr.ex.invalidate_rkey = frmr->mr->lkey;
330         }
331         ctxt->wr_op = read_wr.wr.opcode;
332
333         /* Post the chain */
334         ret = svc_rdma_send(xprt, &reg_wr.wr);
335         if (ret) {
336                 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
337                 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
338                 goto err;
339         }
340
341         /* return current location in page array */
342         *page_no = pg_no;
343         *page_offset = pg_off;
344         ret = read;
345         atomic_inc(&rdma_stat_read);
346         return ret;
347  err:
348         ib_dma_unmap_sg(xprt->sc_cm_id->device,
349                         frmr->sg, frmr->sg_nents, frmr->direction);
350         svc_rdma_put_context(ctxt, 0);
351         svc_rdma_put_frmr(xprt, frmr);
352         return ret;
353 }
354
355 static unsigned int
356 rdma_rcl_chunk_count(struct rpcrdma_read_chunk *ch)
357 {
358         unsigned int count;
359
360         for (count = 0; ch->rc_discrim != xdr_zero; ch++)
361                 count++;
362         return count;
363 }
364
365 /* If there was additional inline content, append it to the end of arg.pages.
366  * Tail copy has to be done after the reader function has determined how many
367  * pages are needed for RDMA READ.
368  */
369 static int
370 rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head,
371                u32 position, u32 byte_count, u32 page_offset, int page_no)
372 {
373         char *srcp, *destp;
374         int ret;
375
376         ret = 0;
377         srcp = head->arg.head[0].iov_base + position;
378         byte_count = head->arg.head[0].iov_len - position;
379         if (byte_count > PAGE_SIZE) {
380                 dprintk("svcrdma: large tail unsupported\n");
381                 return 0;
382         }
383
384         /* Fit as much of the tail on the current page as possible */
385         if (page_offset != PAGE_SIZE) {
386                 destp = page_address(rqstp->rq_arg.pages[page_no]);
387                 destp += page_offset;
388                 while (byte_count--) {
389                         *destp++ = *srcp++;
390                         page_offset++;
391                         if (page_offset == PAGE_SIZE && byte_count)
392                                 goto more;
393                 }
394                 goto done;
395         }
396
397 more:
398         /* Fit the rest on the next page */
399         page_no++;
400         destp = page_address(rqstp->rq_arg.pages[page_no]);
401         while (byte_count--)
402                 *destp++ = *srcp++;
403
404         rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
405         rqstp->rq_next_page = rqstp->rq_respages + 1;
406
407 done:
408         byte_count = head->arg.head[0].iov_len - position;
409         head->arg.page_len += byte_count;
410         head->arg.len += byte_count;
411         head->arg.buflen += byte_count;
412         return 1;
413 }
414
415 static int rdma_read_chunks(struct svcxprt_rdma *xprt,
416                             struct rpcrdma_msg *rmsgp,
417                             struct svc_rqst *rqstp,
418                             struct svc_rdma_op_ctxt *head)
419 {
420         int page_no, ret;
421         struct rpcrdma_read_chunk *ch;
422         u32 handle, page_offset, byte_count;
423         u32 position;
424         u64 rs_offset;
425         bool last;
426
427         /* If no read list is present, return 0 */
428         ch = svc_rdma_get_read_chunk(rmsgp);
429         if (!ch)
430                 return 0;
431
432         if (rdma_rcl_chunk_count(ch) > RPCSVC_MAXPAGES)
433                 return -EINVAL;
434
435         /* The request is completed when the RDMA_READs complete. The
436          * head context keeps all the pages that comprise the
437          * request.
438          */
439         head->arg.head[0] = rqstp->rq_arg.head[0];
440         head->arg.tail[0] = rqstp->rq_arg.tail[0];
441         head->hdr_count = head->count;
442         head->arg.page_base = 0;
443         head->arg.page_len = 0;
444         head->arg.len = rqstp->rq_arg.len;
445         head->arg.buflen = rqstp->rq_arg.buflen;
446
447         ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
448         position = be32_to_cpu(ch->rc_position);
449
450         /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
451         if (position == 0) {
452                 head->arg.pages = &head->pages[0];
453                 page_offset = head->byte_len;
454         } else {
455                 head->arg.pages = &head->pages[head->count];
456                 page_offset = 0;
457         }
458
459         ret = 0;
460         page_no = 0;
461         for (; ch->rc_discrim != xdr_zero; ch++) {
462                 if (be32_to_cpu(ch->rc_position) != position)
463                         goto err;
464
465                 handle = be32_to_cpu(ch->rc_target.rs_handle),
466                 byte_count = be32_to_cpu(ch->rc_target.rs_length);
467                 xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
468                                  &rs_offset);
469
470                 while (byte_count > 0) {
471                         last = (ch + 1)->rc_discrim == xdr_zero;
472                         ret = xprt->sc_reader(xprt, rqstp, head,
473                                               &page_no, &page_offset,
474                                               handle, byte_count,
475                                               rs_offset, last);
476                         if (ret < 0)
477                                 goto err;
478                         byte_count -= ret;
479                         rs_offset += ret;
480                         head->arg.buflen += ret;
481                 }
482         }
483
484         /* Read list may need XDR round-up (see RFC 5666, s. 3.7) */
485         if (page_offset & 3) {
486                 u32 pad = 4 - (page_offset & 3);
487
488                 head->arg.page_len += pad;
489                 head->arg.len += pad;
490                 head->arg.buflen += pad;
491                 page_offset += pad;
492         }
493
494         ret = 1;
495         if (position && position < head->arg.head[0].iov_len)
496                 ret = rdma_copy_tail(rqstp, head, position,
497                                      byte_count, page_offset, page_no);
498         head->arg.head[0].iov_len = position;
499         head->position = position;
500
501  err:
502         /* Detach arg pages. svc_recv will replenish them */
503         for (page_no = 0;
504              &rqstp->rq_pages[page_no] < rqstp->rq_respages; page_no++)
505                 rqstp->rq_pages[page_no] = NULL;
506
507         return ret;
508 }
509
510 static int rdma_read_complete(struct svc_rqst *rqstp,
511                               struct svc_rdma_op_ctxt *head)
512 {
513         int page_no;
514         int ret;
515
516         /* Copy RPC pages */
517         for (page_no = 0; page_no < head->count; page_no++) {
518                 put_page(rqstp->rq_pages[page_no]);
519                 rqstp->rq_pages[page_no] = head->pages[page_no];
520         }
521
522         /* Adjustments made for RDMA_NOMSG type requests */
523         if (head->position == 0) {
524                 if (head->arg.len <= head->sge[0].length) {
525                         head->arg.head[0].iov_len = head->arg.len -
526                                                         head->byte_len;
527                         head->arg.page_len = 0;
528                 } else {
529                         head->arg.head[0].iov_len = head->sge[0].length -
530                                                                 head->byte_len;
531                         head->arg.page_len = head->arg.len -
532                                                 head->sge[0].length;
533                 }
534         }
535
536         /* Point rq_arg.pages past header */
537         rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
538         rqstp->rq_arg.page_len = head->arg.page_len;
539         rqstp->rq_arg.page_base = head->arg.page_base;
540
541         /* rq_respages starts after the last arg page */
542         rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
543         rqstp->rq_next_page = rqstp->rq_respages + 1;
544
545         /* Rebuild rq_arg head and tail. */
546         rqstp->rq_arg.head[0] = head->arg.head[0];
547         rqstp->rq_arg.tail[0] = head->arg.tail[0];
548         rqstp->rq_arg.len = head->arg.len;
549         rqstp->rq_arg.buflen = head->arg.buflen;
550
551         /* Free the context */
552         svc_rdma_put_context(head, 0);
553
554         /* XXX: What should this be? */
555         rqstp->rq_prot = IPPROTO_MAX;
556         svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);
557
558         ret = rqstp->rq_arg.head[0].iov_len
559                 + rqstp->rq_arg.page_len
560                 + rqstp->rq_arg.tail[0].iov_len;
561         dprintk("svcrdma: deferred read ret=%d, rq_arg.len=%u, "
562                 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu\n",
563                 ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
564                 rqstp->rq_arg.head[0].iov_len);
565
566         return ret;
567 }
568
569 /*
570  * Set up the rqstp thread context to point to the RQ buffer. If
571  * necessary, pull additional data from the client with an RDMA_READ
572  * request.
573  */
574 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
575 {
576         struct svc_xprt *xprt = rqstp->rq_xprt;
577         struct svcxprt_rdma *rdma_xprt =
578                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
579         struct svc_rdma_op_ctxt *ctxt = NULL;
580         struct rpcrdma_msg *rmsgp;
581         int ret = 0;
582         int len;
583
584         dprintk("svcrdma: rqstp=%p\n", rqstp);
585
586         spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
587         if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
588                 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next,
589                                   struct svc_rdma_op_ctxt,
590                                   dto_q);
591                 list_del_init(&ctxt->dto_q);
592                 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
593                 return rdma_read_complete(rqstp, ctxt);
594         } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
595                 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
596                                   struct svc_rdma_op_ctxt,
597                                   dto_q);
598                 list_del_init(&ctxt->dto_q);
599         } else {
600                 atomic_inc(&rdma_stat_rq_starve);
601                 clear_bit(XPT_DATA, &xprt->xpt_flags);
602                 ctxt = NULL;
603         }
604         spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
605         if (!ctxt) {
606                 /* This is the EAGAIN path. The svc_recv routine will
607                  * return -EAGAIN, the nfsd thread will go to call into
608                  * svc_recv again and we shouldn't be on the active
609                  * transport list
610                  */
611                 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
612                         goto close_out;
613
614                 goto out;
615         }
616         dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
617                 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
618         atomic_inc(&rdma_stat_recv);
619
620         /* Build up the XDR from the receive buffers. */
621         rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
622
623         /* Decode the RDMA header. */
624         len = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
625         rqstp->rq_xprt_hlen = len;
626
627         /* If the request is invalid, reply with an error */
628         if (len < 0) {
629                 if (len == -ENOSYS)
630                         svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS);
631                 goto close_out;
632         }
633
634         /* Read read-list data. */
635         ret = rdma_read_chunks(rdma_xprt, rmsgp, rqstp, ctxt);
636         if (ret > 0) {
637                 /* read-list posted, defer until data received from client. */
638                 goto defer;
639         } else if (ret < 0) {
640                 /* Post of read-list failed, free context. */
641                 svc_rdma_put_context(ctxt, 1);
642                 return 0;
643         }
644
645         ret = rqstp->rq_arg.head[0].iov_len
646                 + rqstp->rq_arg.page_len
647                 + rqstp->rq_arg.tail[0].iov_len;
648         svc_rdma_put_context(ctxt, 0);
649  out:
650         dprintk("svcrdma: ret=%d, rq_arg.len=%u, "
651                 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n",
652                 ret, rqstp->rq_arg.len,
653                 rqstp->rq_arg.head[0].iov_base,
654                 rqstp->rq_arg.head[0].iov_len);
655         rqstp->rq_prot = IPPROTO_MAX;
656         svc_xprt_copy_addrs(rqstp, xprt);
657         return ret;
658
659  close_out:
660         if (ctxt)
661                 svc_rdma_put_context(ctxt, 1);
662         dprintk("svcrdma: transport %p is closing\n", xprt);
663         /*
664          * Set the close bit and enqueue it. svc_recv will see the
665          * close bit and call svc_xprt_delete
666          */
667         set_bit(XPT_CLOSE, &xprt->xpt_flags);
668 defer:
669         return 0;
670 }