1 /*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6 /* No-op chunk preparation. All client memory is pre-registered.
7 * Sometimes referred to as ALLPHYSICAL mode.
8 *
9 * Physical registration is simple because all client memory is
10 * pre-registered and never deregistered. This mode is good for
11 * adapter bring up, but is considered not safe: the server is
12 * trusted not to abuse its access to client memory not involved
13 * in RDMA I/O.
14 */
15
16 #include "xprt_rdma.h"
17
18 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
19 # define RPCDBG_FACILITY RPCDBG_TRANS
20 #endif
21
22 static int
physical_op_open(struct rpcrdma_ia * ia,struct rpcrdma_ep * ep,struct rpcrdma_create_data_internal * cdata)23 physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
24 struct rpcrdma_create_data_internal *cdata)
25 {
26 struct ib_mr *mr;
27
28 /* Obtain an rkey to use for RPC data payloads.
29 */
30 mr = ib_get_dma_mr(ia->ri_pd,
31 IB_ACCESS_LOCAL_WRITE |
32 IB_ACCESS_REMOTE_WRITE |
33 IB_ACCESS_REMOTE_READ);
34 if (IS_ERR(mr)) {
35 pr_err("%s: ib_get_dma_mr for failed with %lX\n",
36 __func__, PTR_ERR(mr));
37 return -ENOMEM;
38 }
39
40 ia->ri_dma_mr = mr;
41 return 0;
42 }
43
44 /* PHYSICAL memory registration conveys one page per chunk segment.
45 */
46 static size_t
physical_op_maxpages(struct rpcrdma_xprt * r_xprt)47 physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
48 {
49 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
50 rpcrdma_max_segments(r_xprt));
51 }
52
53 static int
physical_op_init(struct rpcrdma_xprt * r_xprt)54 physical_op_init(struct rpcrdma_xprt *r_xprt)
55 {
56 return 0;
57 }
58
59 /* The client's physical memory is already exposed for
60 * remote access via RDMA READ or RDMA WRITE.
61 */
62 static int
physical_op_map(struct rpcrdma_xprt * r_xprt,struct rpcrdma_mr_seg * seg,int nsegs,bool writing)63 physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
64 int nsegs, bool writing)
65 {
66 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
67
68 rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
69 seg->mr_rkey = ia->ri_dma_mr->rkey;
70 seg->mr_base = seg->mr_dma;
71 seg->mr_nsegs = 1;
72 return 1;
73 }
74
75 /* Unmap a memory region, but leave it registered.
76 */
77 static int
physical_op_unmap(struct rpcrdma_xprt * r_xprt,struct rpcrdma_mr_seg * seg)78 physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
79 {
80 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
81
82 rpcrdma_unmap_one(ia->ri_device, seg);
83 return 1;
84 }
85
86 static void
physical_op_destroy(struct rpcrdma_buffer * buf)87 physical_op_destroy(struct rpcrdma_buffer *buf)
88 {
89 }
90
91 const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
92 .ro_map = physical_op_map,
93 .ro_unmap = physical_op_unmap,
94 .ro_open = physical_op_open,
95 .ro_maxpages = physical_op_maxpages,
96 .ro_init = physical_op_init,
97 .ro_destroy = physical_op_destroy,
98 .ro_displayname = "physical",
99 };
100