2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Author: Tom Tucker <tom@opengridcomputing.com>
43 #include <linux/sunrpc/debug.h>
44 #include <linux/sunrpc/rpc_rdma.h>
45 #include <linux/spinlock.h>
46 #include <asm/unaligned.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/rdma_cm.h>
49 #include <linux/sunrpc/svc_rdma.h>
51 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
54 * Replace the pages in the rq_argpages array with the pages from the SGE in
55 * the RDMA_RECV completion. The SGL should contain full pages up until the
58 static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
59 struct svc_rdma_op_ctxt *ctxt,
62 struct rpcrdma_msg *rmsgp;
67 /* Swap the page in the SGE with the page in argpages */
68 page = ctxt->pages[0];
69 put_page(rqstp->rq_pages[0]);
70 rqstp->rq_pages[0] = page;
72 /* Set up the XDR head */
73 rqstp->rq_arg.head[0].iov_base = page_address(page);
74 rqstp->rq_arg.head[0].iov_len =
75 min_t(size_t, byte_count, ctxt->sge[0].length);
76 rqstp->rq_arg.len = byte_count;
77 rqstp->rq_arg.buflen = byte_count;
79 /* Compute bytes past head in the SGL */
80 bc = byte_count - rqstp->rq_arg.head[0].iov_len;
82 /* If data remains, store it in the pagelist */
83 rqstp->rq_arg.page_len = bc;
84 rqstp->rq_arg.page_base = 0;
86 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
87 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
88 if (rmsgp->rm_type == rdma_nomsg)
89 rqstp->rq_arg.pages = &rqstp->rq_pages[0];
91 rqstp->rq_arg.pages = &rqstp->rq_pages[1];
94 while (bc && sge_no < ctxt->count) {
95 page = ctxt->pages[sge_no];
96 put_page(rqstp->rq_pages[sge_no]);
97 rqstp->rq_pages[sge_no] = page;
98 bc -= min_t(u32, bc, ctxt->sge[sge_no].length);
99 rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
102 rqstp->rq_respages = &rqstp->rq_pages[sge_no];
103 rqstp->rq_next_page = rqstp->rq_respages + 1;
105 /* If not all pages were used from the SGL, free the remaining ones */
107 while (sge_no < ctxt->count) {
108 page = ctxt->pages[sge_no++];
114 rqstp->rq_arg.tail[0].iov_base = NULL;
115 rqstp->rq_arg.tail[0].iov_len = 0;
118 /* Issue an RDMA_READ using the local lkey to map the data sink */
119 int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
120 struct svc_rqst *rqstp,
121 struct svc_rdma_op_ctxt *head,
129 struct ib_send_wr read_wr;
130 int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
131 struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
133 u32 pg_off = *page_offset;
134 u32 pg_no = *page_no;
136 ctxt->direction = DMA_FROM_DEVICE;
137 ctxt->read_hdr = head;
138 pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
139 read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
141 for (pno = 0; pno < pages_needed; pno++) {
142 int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
144 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
145 head->arg.page_len += len;
146 head->arg.len += len;
149 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
150 rqstp->rq_next_page = rqstp->rq_respages + 1;
151 ctxt->sge[pno].addr =
152 ib_dma_map_page(xprt->sc_cm_id->device,
153 head->arg.pages[pg_no], pg_off,
156 ret = ib_dma_mapping_error(xprt->sc_cm_id->device,
157 ctxt->sge[pno].addr);
160 atomic_inc(&xprt->sc_dma_used);
162 /* The lkey here is either a local dma lkey or a dma_mr lkey */
163 ctxt->sge[pno].lkey = xprt->sc_dma_lkey;
164 ctxt->sge[pno].length = len;
167 /* adjust offset and wrap to next page if needed */
169 if (pg_off == PAGE_SIZE) {
176 if (last && rs_length == 0)
177 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
179 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
181 memset(&read_wr, 0, sizeof(read_wr));
182 read_wr.wr_id = (unsigned long)ctxt;
183 read_wr.opcode = IB_WR_RDMA_READ;
184 ctxt->wr_op = read_wr.opcode;
185 read_wr.send_flags = IB_SEND_SIGNALED;
186 read_wr.wr.rdma.rkey = rs_handle;
187 read_wr.wr.rdma.remote_addr = rs_offset;
188 read_wr.sg_list = ctxt->sge;
189 read_wr.num_sge = pages_needed;
191 ret = svc_rdma_send(xprt, &read_wr);
193 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
194 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
198 /* return current location in page array */
200 *page_offset = pg_off;
202 atomic_inc(&rdma_stat_read);
205 svc_rdma_unmap_dma(ctxt);
206 svc_rdma_put_context(ctxt, 0);
210 /* Issue an RDMA_READ using an FRMR to map the data sink */
211 int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
212 struct svc_rqst *rqstp,
213 struct svc_rdma_op_ctxt *head,
221 struct ib_send_wr read_wr;
222 struct ib_send_wr inv_wr;
223 struct ib_send_wr fastreg_wr;
225 int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
226 struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
227 struct svc_rdma_fastreg_mr *frmr = svc_rdma_get_frmr(xprt);
229 u32 pg_off = *page_offset;
230 u32 pg_no = *page_no;
235 ctxt->direction = DMA_FROM_DEVICE;
237 pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len);
238 read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
240 frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]);
241 frmr->direction = DMA_FROM_DEVICE;
242 frmr->access_flags = (IB_ACCESS_LOCAL_WRITE|IB_ACCESS_REMOTE_WRITE);
243 frmr->map_len = pages_needed << PAGE_SHIFT;
244 frmr->page_list_len = pages_needed;
246 for (pno = 0; pno < pages_needed; pno++) {
247 int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
249 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
250 head->arg.page_len += len;
251 head->arg.len += len;
254 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
255 rqstp->rq_next_page = rqstp->rq_respages + 1;
256 frmr->page_list->page_list[pno] =
257 ib_dma_map_page(xprt->sc_cm_id->device,
258 head->arg.pages[pg_no], 0,
259 PAGE_SIZE, DMA_FROM_DEVICE);
260 ret = ib_dma_mapping_error(xprt->sc_cm_id->device,
261 frmr->page_list->page_list[pno]);
264 atomic_inc(&xprt->sc_dma_used);
266 /* adjust offset and wrap to next page if needed */
268 if (pg_off == PAGE_SIZE) {
275 if (last && rs_length == 0)
276 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
278 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
281 key = (u8)(frmr->mr->lkey & 0x000000FF);
282 ib_update_fast_reg_key(frmr->mr, ++key);
284 ctxt->sge[0].addr = (unsigned long)frmr->kva + *page_offset;
285 ctxt->sge[0].lkey = frmr->mr->lkey;
286 ctxt->sge[0].length = read;
288 ctxt->read_hdr = head;
290 /* Prepare FASTREG WR */
291 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
292 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
293 fastreg_wr.send_flags = IB_SEND_SIGNALED;
294 fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
295 fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
296 fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
297 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
298 fastreg_wr.wr.fast_reg.length = frmr->map_len;
299 fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
300 fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
301 fastreg_wr.next = &read_wr;
303 /* Prepare RDMA_READ */
304 memset(&read_wr, 0, sizeof(read_wr));
305 read_wr.send_flags = IB_SEND_SIGNALED;
306 read_wr.wr.rdma.rkey = rs_handle;
307 read_wr.wr.rdma.remote_addr = rs_offset;
308 read_wr.sg_list = ctxt->sge;
310 if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) {
311 read_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
312 read_wr.wr_id = (unsigned long)ctxt;
313 read_wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;
315 read_wr.opcode = IB_WR_RDMA_READ;
316 read_wr.next = &inv_wr;
317 /* Prepare invalidate */
318 memset(&inv_wr, 0, sizeof(inv_wr));
319 inv_wr.wr_id = (unsigned long)ctxt;
320 inv_wr.opcode = IB_WR_LOCAL_INV;
321 inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE;
322 inv_wr.ex.invalidate_rkey = frmr->mr->lkey;
324 ctxt->wr_op = read_wr.opcode;
327 ret = svc_rdma_send(xprt, &fastreg_wr);
329 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
330 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
334 /* return current location in page array */
336 *page_offset = pg_off;
338 atomic_inc(&rdma_stat_read);
341 svc_rdma_unmap_dma(ctxt);
342 svc_rdma_put_context(ctxt, 0);
343 svc_rdma_put_frmr(xprt, frmr);
348 rdma_rcl_chunk_count(struct rpcrdma_read_chunk *ch)
352 for (count = 0; ch->rc_discrim != xdr_zero; ch++)
357 /* If there was additional inline content, append it to the end of arg.pages.
358 * Tail copy has to be done after the reader function has determined how many
359 * pages are needed for RDMA READ.
362 rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head,
363 u32 position, u32 byte_count, u32 page_offset, int page_no)
369 srcp = head->arg.head[0].iov_base + position;
370 byte_count = head->arg.head[0].iov_len - position;
371 if (byte_count > PAGE_SIZE) {
372 dprintk("svcrdma: large tail unsupported\n");
376 /* Fit as much of the tail on the current page as possible */
377 if (page_offset != PAGE_SIZE) {
378 destp = page_address(rqstp->rq_arg.pages[page_no]);
379 destp += page_offset;
380 while (byte_count--) {
383 if (page_offset == PAGE_SIZE && byte_count)
390 /* Fit the rest on the next page */
392 destp = page_address(rqstp->rq_arg.pages[page_no]);
396 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
397 rqstp->rq_next_page = rqstp->rq_respages + 1;
400 byte_count = head->arg.head[0].iov_len - position;
401 head->arg.page_len += byte_count;
402 head->arg.len += byte_count;
403 head->arg.buflen += byte_count;
407 static int rdma_read_chunks(struct svcxprt_rdma *xprt,
408 struct rpcrdma_msg *rmsgp,
409 struct svc_rqst *rqstp,
410 struct svc_rdma_op_ctxt *head)
413 struct rpcrdma_read_chunk *ch;
414 u32 handle, page_offset, byte_count;
419 /* If no read list is present, return 0 */
420 ch = svc_rdma_get_read_chunk(rmsgp);
424 if (rdma_rcl_chunk_count(ch) > RPCSVC_MAXPAGES)
427 /* The request is completed when the RDMA_READs complete. The
428 * head context keeps all the pages that comprise the
431 head->arg.head[0] = rqstp->rq_arg.head[0];
432 head->arg.tail[0] = rqstp->rq_arg.tail[0];
433 head->hdr_count = head->count;
434 head->arg.page_base = 0;
435 head->arg.page_len = 0;
436 head->arg.len = rqstp->rq_arg.len;
437 head->arg.buflen = rqstp->rq_arg.buflen;
439 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
440 position = be32_to_cpu(ch->rc_position);
442 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
444 head->arg.pages = &head->pages[0];
445 page_offset = head->byte_len;
447 head->arg.pages = &head->pages[head->count];
453 for (; ch->rc_discrim != xdr_zero; ch++) {
454 if (be32_to_cpu(ch->rc_position) != position)
457 handle = be32_to_cpu(ch->rc_target.rs_handle),
458 byte_count = be32_to_cpu(ch->rc_target.rs_length);
459 xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
462 while (byte_count > 0) {
463 last = (ch + 1)->rc_discrim == xdr_zero;
464 ret = xprt->sc_reader(xprt, rqstp, head,
465 &page_no, &page_offset,
472 head->arg.buflen += ret;
476 /* Read list may need XDR round-up (see RFC 5666, s. 3.7) */
477 if (page_offset & 3) {
478 u32 pad = 4 - (page_offset & 3);
480 head->arg.page_len += pad;
481 head->arg.len += pad;
482 head->arg.buflen += pad;
487 if (position && position < head->arg.head[0].iov_len)
488 ret = rdma_copy_tail(rqstp, head, position,
489 byte_count, page_offset, page_no);
490 head->arg.head[0].iov_len = position;
491 head->position = position;
494 /* Detach arg pages. svc_recv will replenish them */
496 &rqstp->rq_pages[page_no] < rqstp->rq_respages; page_no++)
497 rqstp->rq_pages[page_no] = NULL;
502 static int rdma_read_complete(struct svc_rqst *rqstp,
503 struct svc_rdma_op_ctxt *head)
509 for (page_no = 0; page_no < head->count; page_no++) {
510 put_page(rqstp->rq_pages[page_no]);
511 rqstp->rq_pages[page_no] = head->pages[page_no];
514 /* Adjustments made for RDMA_NOMSG type requests */
515 if (head->position == 0) {
516 if (head->arg.len <= head->sge[0].length) {
517 head->arg.head[0].iov_len = head->arg.len -
519 head->arg.page_len = 0;
521 head->arg.head[0].iov_len = head->sge[0].length -
523 head->arg.page_len = head->arg.len -
528 /* Point rq_arg.pages past header */
529 rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
530 rqstp->rq_arg.page_len = head->arg.page_len;
531 rqstp->rq_arg.page_base = head->arg.page_base;
533 /* rq_respages starts after the last arg page */
534 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
535 rqstp->rq_next_page = rqstp->rq_respages + 1;
537 /* Rebuild rq_arg head and tail. */
538 rqstp->rq_arg.head[0] = head->arg.head[0];
539 rqstp->rq_arg.tail[0] = head->arg.tail[0];
540 rqstp->rq_arg.len = head->arg.len;
541 rqstp->rq_arg.buflen = head->arg.buflen;
543 /* Free the context */
544 svc_rdma_put_context(head, 0);
546 /* XXX: What should this be? */
547 rqstp->rq_prot = IPPROTO_MAX;
548 svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);
550 ret = rqstp->rq_arg.head[0].iov_len
551 + rqstp->rq_arg.page_len
552 + rqstp->rq_arg.tail[0].iov_len;
553 dprintk("svcrdma: deferred read ret=%d, rq_arg.len=%u, "
554 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu\n",
555 ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
556 rqstp->rq_arg.head[0].iov_len);
562 * Set up the rqstp thread context to point to the RQ buffer. If
563 * necessary, pull additional data from the client with an RDMA_READ
566 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
568 struct svc_xprt *xprt = rqstp->rq_xprt;
569 struct svcxprt_rdma *rdma_xprt =
570 container_of(xprt, struct svcxprt_rdma, sc_xprt);
571 struct svc_rdma_op_ctxt *ctxt = NULL;
572 struct rpcrdma_msg *rmsgp;
576 dprintk("svcrdma: rqstp=%p\n", rqstp);
578 spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
579 if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
580 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next,
581 struct svc_rdma_op_ctxt,
583 list_del_init(&ctxt->dto_q);
584 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
585 return rdma_read_complete(rqstp, ctxt);
586 } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
587 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
588 struct svc_rdma_op_ctxt,
590 list_del_init(&ctxt->dto_q);
592 atomic_inc(&rdma_stat_rq_starve);
593 clear_bit(XPT_DATA, &xprt->xpt_flags);
596 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
598 /* This is the EAGAIN path. The svc_recv routine will
599 * return -EAGAIN, the nfsd thread will go to call into
600 * svc_recv again and we shouldn't be on the active
603 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
608 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
609 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
610 atomic_inc(&rdma_stat_recv);
612 /* Build up the XDR from the receive buffers. */
613 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
615 /* Decode the RDMA header. */
616 len = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
617 rqstp->rq_xprt_hlen = len;
619 /* If the request is invalid, reply with an error */
622 svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS);
626 /* Read read-list data. */
627 ret = rdma_read_chunks(rdma_xprt, rmsgp, rqstp, ctxt);
629 /* read-list posted, defer until data received from client. */
631 } else if (ret < 0) {
632 /* Post of read-list failed, free context. */
633 svc_rdma_put_context(ctxt, 1);
637 ret = rqstp->rq_arg.head[0].iov_len
638 + rqstp->rq_arg.page_len
639 + rqstp->rq_arg.tail[0].iov_len;
640 svc_rdma_put_context(ctxt, 0);
642 dprintk("svcrdma: ret=%d, rq_arg.len=%u, "
643 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n",
644 ret, rqstp->rq_arg.len,
645 rqstp->rq_arg.head[0].iov_base,
646 rqstp->rq_arg.head[0].iov_len);
647 rqstp->rq_prot = IPPROTO_MAX;
648 svc_xprt_copy_addrs(rqstp, xprt);
653 svc_rdma_put_context(ctxt, 1);
654 dprintk("svcrdma: transport %p is closing\n", xprt);
656 * Set the close bit and enqueue it. svc_recv will see the
657 * close bit and call svc_xprt_delete
659 set_bit(XPT_CLOSE, &xprt->xpt_flags);