1 /*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6 /* No-op chunk preparation. All client memory is pre-registered.
7 * Sometimes referred to as ALLPHYSICAL mode.
8 *
9 * Physical registration is simple because all client memory is
10 * pre-registered and never deregistered. This mode is good for
11 * adapter bring up, but is considered not safe: the server is
12 * trusted not to abuse its access to client memory not involved
13 * in RDMA I/O.
14 */
15
16 #include "xprt_rdma.h"
17
18 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
19 # define RPCDBG_FACILITY RPCDBG_TRANS
20 #endif
21
22 static int
physical_op_open(struct rpcrdma_ia * ia,struct rpcrdma_ep * ep,struct rpcrdma_create_data_internal * cdata)23 physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
24 struct rpcrdma_create_data_internal *cdata)
25 {
26 return 0;
27 }
28
29 /* PHYSICAL memory registration conveys one page per chunk segment.
30 */
31 static size_t
physical_op_maxpages(struct rpcrdma_xprt * r_xprt)32 physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
33 {
34 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
35 rpcrdma_max_segments(r_xprt));
36 }
37
38 static int
physical_op_init(struct rpcrdma_xprt * r_xprt)39 physical_op_init(struct rpcrdma_xprt *r_xprt)
40 {
41 return 0;
42 }
43
44 /* The client's physical memory is already exposed for
45 * remote access via RDMA READ or RDMA WRITE.
46 */
47 static int
physical_op_map(struct rpcrdma_xprt * r_xprt,struct rpcrdma_mr_seg * seg,int nsegs,bool writing)48 physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
49 int nsegs, bool writing)
50 {
51 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
52
53 rpcrdma_map_one(ia->ri_id->device, seg,
54 rpcrdma_data_dir(writing));
55 seg->mr_rkey = ia->ri_bind_mem->rkey;
56 seg->mr_base = seg->mr_dma;
57 seg->mr_nsegs = 1;
58 return 1;
59 }
60
61 /* Unmap a memory region, but leave it registered.
62 */
63 static int
physical_op_unmap(struct rpcrdma_xprt * r_xprt,struct rpcrdma_mr_seg * seg)64 physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
65 {
66 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
67
68 read_lock(&ia->ri_qplock);
69 rpcrdma_unmap_one(ia->ri_id->device, seg);
70 read_unlock(&ia->ri_qplock);
71
72 return 1;
73 }
74
75 static void
physical_op_reset(struct rpcrdma_xprt * r_xprt)76 physical_op_reset(struct rpcrdma_xprt *r_xprt)
77 {
78 }
79
80 static void
physical_op_destroy(struct rpcrdma_buffer * buf)81 physical_op_destroy(struct rpcrdma_buffer *buf)
82 {
83 }
84
85 const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
86 .ro_map = physical_op_map,
87 .ro_unmap = physical_op_unmap,
88 .ro_open = physical_op_open,
89 .ro_maxpages = physical_op_maxpages,
90 .ro_init = physical_op_init,
91 .ro_reset = physical_op_reset,
92 .ro_destroy = physical_op_destroy,
93 .ro_displayname = "physical",
94 };
95