This source file includes following definitions.
- rdmab_addr
- rdmab_length
- rdmab_lkey
- rdmab_device
- rdmab_data
- rpcr_to_rdmar
- rpcrdma_mr_push
- rpcrdma_mr_pop
- rpcrdma_addrstr
- rpcrdma_portstr
- rpcrdma_mr_recycle
- rpcrdma_regbuf_is_mapped
- rpcrdma_regbuf_dma_map
- rpcrdma_data_dir
- rpcrdma_set_xdrlen
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42 #ifndef _LINUX_SUNRPC_XPRT_RDMA_H
43 #define _LINUX_SUNRPC_XPRT_RDMA_H
44
45 #include <linux/wait.h>
46 #include <linux/spinlock.h>
47 #include <linux/atomic.h>
48 #include <linux/kref.h>
49 #include <linux/workqueue.h>
50 #include <linux/llist.h>
51
52 #include <rdma/rdma_cm.h>
53 #include <rdma/ib_verbs.h>
54
55 #include <linux/sunrpc/clnt.h>
56 #include <linux/sunrpc/rpc_rdma.h>
57 #include <linux/sunrpc/xprtrdma.h>
58
59 #define RDMA_RESOLVE_TIMEOUT (5000)
60 #define RDMA_CONNECT_RETRY_MAX (2)
61
62 #define RPCRDMA_BIND_TO (60U * HZ)
63 #define RPCRDMA_INIT_REEST_TO (5U * HZ)
64 #define RPCRDMA_MAX_REEST_TO (30U * HZ)
65 #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ)
66
67
68
69
70 struct rpcrdma_ia {
71 struct rdma_cm_id *ri_id;
72 struct ib_pd *ri_pd;
73 int ri_async_rc;
74 unsigned int ri_max_segs;
75 unsigned int ri_max_frwr_depth;
76 unsigned int ri_max_send_sges;
77 bool ri_implicit_roundup;
78 enum ib_mr_type ri_mrtype;
79 unsigned long ri_flags;
80 struct completion ri_done;
81 struct completion ri_remove_done;
82 };
83
84 enum {
85 RPCRDMA_IAF_REMOVING = 0,
86 };
87
88
89
90
91
92 struct rpcrdma_ep {
93 unsigned int rep_send_count;
94 unsigned int rep_send_batch;
95 unsigned int rep_max_inline_send;
96 unsigned int rep_max_inline_recv;
97 int rep_connected;
98 struct ib_qp_init_attr rep_attr;
99 wait_queue_head_t rep_connect_wait;
100 struct rpcrdma_connect_private rep_cm_private;
101 struct rdma_conn_param rep_remote_cma;
102 unsigned int rep_max_requests;
103 unsigned int rep_inline_send;
104 unsigned int rep_inline_recv;
105 int rep_receive_count;
106 };
107
108
109
110
111
112
113
114 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
115 #define RPCRDMA_BACKWARD_WRS (32)
116 #else
117 #define RPCRDMA_BACKWARD_WRS (0)
118 #endif
119
120
121
122
123 struct rpcrdma_regbuf {
124 struct ib_sge rg_iov;
125 struct ib_device *rg_device;
126 enum dma_data_direction rg_direction;
127 void *rg_data;
128 };
129
130 static inline u64 rdmab_addr(struct rpcrdma_regbuf *rb)
131 {
132 return rb->rg_iov.addr;
133 }
134
135 static inline u32 rdmab_length(struct rpcrdma_regbuf *rb)
136 {
137 return rb->rg_iov.length;
138 }
139
140 static inline u32 rdmab_lkey(struct rpcrdma_regbuf *rb)
141 {
142 return rb->rg_iov.lkey;
143 }
144
145 static inline struct ib_device *rdmab_device(struct rpcrdma_regbuf *rb)
146 {
147 return rb->rg_device;
148 }
149
150 static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
151 {
152 return rb->rg_data;
153 }
154
155 #define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173 enum {
174 RPCRDMA_MAX_HDR_SEGS = 16,
175 };
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191 struct rpcrdma_rep {
192 struct ib_cqe rr_cqe;
193 __be32 rr_xid;
194 __be32 rr_vers;
195 __be32 rr_proc;
196 int rr_wc_flags;
197 u32 rr_inv_rkey;
198 bool rr_temp;
199 struct rpcrdma_regbuf *rr_rdmabuf;
200 struct rpcrdma_xprt *rr_rxprt;
201 struct rpc_rqst *rr_rqst;
202 struct xdr_buf rr_hdrbuf;
203 struct xdr_stream rr_stream;
204 struct llist_node rr_node;
205 struct ib_recv_wr rr_recv_wr;
206 struct list_head rr_all;
207 };
208
209
210
211
212
213
214
215 enum {
216 RPCRDMA_MAX_RECV_BATCH = 7,
217 };
218
219
220
221 struct rpcrdma_req;
222 struct rpcrdma_xprt;
223 struct rpcrdma_sendctx {
224 struct ib_send_wr sc_wr;
225 struct ib_cqe sc_cqe;
226 struct ib_device *sc_device;
227 struct rpcrdma_xprt *sc_xprt;
228 struct rpcrdma_req *sc_req;
229 unsigned int sc_unmap_count;
230 struct ib_sge sc_sges[];
231 };
232
233
234
235
236
237
238
239 struct rpcrdma_frwr {
240 struct ib_mr *fr_mr;
241 struct ib_cqe fr_cqe;
242 struct completion fr_linv_done;
243 union {
244 struct ib_reg_wr fr_regwr;
245 struct ib_send_wr fr_invwr;
246 };
247 };
248
249 struct rpcrdma_req;
250 struct rpcrdma_mr {
251 struct list_head mr_list;
252 struct rpcrdma_req *mr_req;
253 struct scatterlist *mr_sg;
254 int mr_nents;
255 enum dma_data_direction mr_dir;
256 struct rpcrdma_frwr frwr;
257 struct rpcrdma_xprt *mr_xprt;
258 u32 mr_handle;
259 u32 mr_length;
260 u64 mr_offset;
261 struct work_struct mr_recycle;
262 struct list_head mr_all;
263 };
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286 enum {
287 RPCRDMA_MAX_IOV_SEGS = 3,
288 RPCRDMA_MAX_DATA_SEGS = ((1 * 1024 * 1024) / PAGE_SIZE) + 1,
289 RPCRDMA_MAX_SEGS = RPCRDMA_MAX_DATA_SEGS +
290 RPCRDMA_MAX_IOV_SEGS,
291 };
292
293 struct rpcrdma_mr_seg {
294 u32 mr_len;
295 struct page *mr_page;
296 char *mr_offset;
297 };
298
299
300
301
302
303
304
305
306
307
308
309 enum {
310 RPCRDMA_MIN_SEND_SGES = 3,
311 RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT,
312 RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1,
313 };
314
315 struct rpcrdma_buffer;
316 struct rpcrdma_req {
317 struct list_head rl_list;
318 struct rpc_rqst rl_slot;
319 struct rpcrdma_rep *rl_reply;
320 struct xdr_stream rl_stream;
321 struct xdr_buf rl_hdrbuf;
322 struct rpcrdma_sendctx *rl_sendctx;
323 struct rpcrdma_regbuf *rl_rdmabuf;
324 struct rpcrdma_regbuf *rl_sendbuf;
325 struct rpcrdma_regbuf *rl_recvbuf;
326
327 struct list_head rl_all;
328 struct kref rl_kref;
329
330 struct list_head rl_free_mrs;
331 struct list_head rl_registered;
332 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
333 };
334
335 static inline struct rpcrdma_req *
336 rpcr_to_rdmar(const struct rpc_rqst *rqst)
337 {
338 return container_of(rqst, struct rpcrdma_req, rl_slot);
339 }
340
341 static inline void
342 rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list)
343 {
344 list_add(&mr->mr_list, list);
345 }
346
347 static inline struct rpcrdma_mr *
348 rpcrdma_mr_pop(struct list_head *list)
349 {
350 struct rpcrdma_mr *mr;
351
352 mr = list_first_entry_or_null(list, struct rpcrdma_mr, mr_list);
353 if (mr)
354 list_del_init(&mr->mr_list);
355 return mr;
356 }
357
358
359
360
361
362
363
364 struct rpcrdma_buffer {
365 spinlock_t rb_lock;
366 struct list_head rb_send_bufs;
367 struct list_head rb_mrs;
368
369 unsigned long rb_sc_head;
370 unsigned long rb_sc_tail;
371 unsigned long rb_sc_last;
372 struct rpcrdma_sendctx **rb_sc_ctxs;
373
374 struct list_head rb_allreqs;
375 struct list_head rb_all_mrs;
376 struct list_head rb_all_reps;
377
378 struct llist_head rb_free_reps;
379
380 u32 rb_max_requests;
381 u32 rb_credits;
382
383 u32 rb_bc_srv_max_requests;
384 u32 rb_bc_max_requests;
385
386 struct work_struct rb_refresh_worker;
387 };
388
389
390
391
392 struct rpcrdma_stats {
393
394 unsigned long read_chunk_count;
395 unsigned long write_chunk_count;
396 unsigned long reply_chunk_count;
397 unsigned long long total_rdma_request;
398
399
400 unsigned long long pullup_copy_count;
401 unsigned long hardway_register_count;
402 unsigned long failed_marshal_count;
403 unsigned long bad_reply_count;
404 unsigned long mrs_recycled;
405 unsigned long mrs_orphaned;
406 unsigned long mrs_allocated;
407 unsigned long empty_sendctx_q;
408
409
410 unsigned long long total_rdma_reply;
411 unsigned long long fixup_copy_count;
412 unsigned long reply_waits_for_send;
413 unsigned long local_inv_needed;
414 unsigned long nomsg_call_count;
415 unsigned long bcall_count;
416 };
417
418
419
420
421
422
423
424
425
426
427
428 struct rpcrdma_xprt {
429 struct rpc_xprt rx_xprt;
430 struct rpcrdma_ia rx_ia;
431 struct rpcrdma_ep rx_ep;
432 struct rpcrdma_buffer rx_buf;
433 struct delayed_work rx_connect_worker;
434 struct rpc_timeout rx_timeout;
435 struct rpcrdma_stats rx_stats;
436 };
437
438 #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
439
440 static inline const char *
441 rpcrdma_addrstr(const struct rpcrdma_xprt *r_xprt)
442 {
443 return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR];
444 }
445
446 static inline const char *
447 rpcrdma_portstr(const struct rpcrdma_xprt *r_xprt)
448 {
449 return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_PORT];
450 }
451
452
453
454
455 extern int xprt_rdma_pad_optimize;
456
457
458
459
460 extern unsigned int xprt_rdma_memreg_strategy;
461
462
463
464
465 int rpcrdma_ia_open(struct rpcrdma_xprt *xprt);
466 void rpcrdma_ia_remove(struct rpcrdma_ia *ia);
467 void rpcrdma_ia_close(struct rpcrdma_ia *);
468
469
470
471
472 int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt);
473 void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt);
474 int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
475 void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
476
477 int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
478 struct rpcrdma_req *);
479 void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
480
481
482
483
484 struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
485 gfp_t flags);
486 void rpcrdma_req_destroy(struct rpcrdma_req *req);
487 int rpcrdma_buffer_create(struct rpcrdma_xprt *);
488 void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
489 struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt);
490
491 struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
492 void rpcrdma_mr_put(struct rpcrdma_mr *mr);
493
494 static inline void
495 rpcrdma_mr_recycle(struct rpcrdma_mr *mr)
496 {
497 schedule_work(&mr->mr_recycle);
498 }
499
500 struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
501 void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers,
502 struct rpcrdma_req *req);
503 void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
504
505 bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
506 gfp_t flags);
507 bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
508 struct rpcrdma_regbuf *rb);
509
510
511
512
513
514
515 static inline bool rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
516 {
517 return rb->rg_device != NULL;
518 }
519
520
521
522
523
524
525
526
527 static inline bool rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
528 struct rpcrdma_regbuf *rb)
529 {
530 if (likely(rpcrdma_regbuf_is_mapped(rb)))
531 return true;
532 return __rpcrdma_regbuf_dma_map(r_xprt, rb);
533 }
534
535
536
537
538
539 static inline enum dma_data_direction
540 rpcrdma_data_dir(bool writing)
541 {
542 return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
543 }
544
545
546
547 bool frwr_is_supported(struct ib_device *device);
548 void frwr_recycle(struct rpcrdma_req *req);
549 void frwr_reset(struct rpcrdma_req *req);
550 int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep);
551 int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr);
552 void frwr_release_mr(struct rpcrdma_mr *mr);
553 size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt);
554 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
555 struct rpcrdma_mr_seg *seg,
556 int nsegs, bool writing, __be32 xid,
557 struct rpcrdma_mr *mr);
558 int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req);
559 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
560 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
561 void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
562
563
564
565
566
567 enum rpcrdma_chunktype {
568 rpcrdma_noch = 0,
569 rpcrdma_readch,
570 rpcrdma_areadch,
571 rpcrdma_writech,
572 rpcrdma_replych
573 };
574
575 int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
576 struct rpcrdma_req *req, u32 hdrlen,
577 struct xdr_buf *xdr,
578 enum rpcrdma_chunktype rtype);
579 void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc);
580 int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
581 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
582 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
583 void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
584
585 static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
586 {
587 xdr->head[0].iov_len = len;
588 xdr->len = len;
589 }
590
591
592
593 extern unsigned int xprt_rdma_slot_table_entries;
594 extern unsigned int xprt_rdma_max_inline_read;
595 extern unsigned int xprt_rdma_max_inline_write;
596 void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
597 void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
598 void xprt_rdma_close(struct rpc_xprt *xprt);
599 void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
600 int xprt_rdma_init(void);
601 void xprt_rdma_cleanup(void);
602
603
604
605 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
606 int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
607 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
608 unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *);
609 int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
610 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
611 int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst);
612 void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
613 void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
614 #endif
615
616 extern struct xprt_class xprt_rdma_bc;
617
618 #endif