This source file includes following definitions.
- frwr_is_supported
- frwr_release_mr
- frwr_mr_recycle
- frwr_mr_recycle_worker
- frwr_recycle
- frwr_reset
- frwr_init_mr
- frwr_open
- frwr_maxpages
- frwr_map
- frwr_wc_fastreg
- frwr_send
- frwr_reminv
- __frwr_release_mr
- frwr_wc_localinv
- frwr_wc_localinv_wake
- frwr_unmap_sync
- frwr_wc_localinv_done
- frwr_unmap_async
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/sunrpc/svc_rdma.h>
45
46 #include "xprt_rdma.h"
47 #include <trace/events/rpcrdma.h>
48
49 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
50 # define RPCDBG_FACILITY RPCDBG_TRANS
51 #endif
52
53
54
55
56
57
58
59 bool frwr_is_supported(struct ib_device *device)
60 {
61 struct ib_device_attr *attrs = &device->attrs;
62
63 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
64 goto out_not_supported;
65 if (attrs->max_fast_reg_page_list_len == 0)
66 goto out_not_supported;
67 return true;
68
69 out_not_supported:
70 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
71 device->name);
72 return false;
73 }
74
75
76
77
78
79
80 void frwr_release_mr(struct rpcrdma_mr *mr)
81 {
82 int rc;
83
84 rc = ib_dereg_mr(mr->frwr.fr_mr);
85 if (rc)
86 trace_xprtrdma_frwr_dereg(mr, rc);
87 kfree(mr->mr_sg);
88 kfree(mr);
89 }
90
91 static void frwr_mr_recycle(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
92 {
93 trace_xprtrdma_mr_recycle(mr);
94
95 if (mr->mr_dir != DMA_NONE) {
96 trace_xprtrdma_mr_unmap(mr);
97 ib_dma_unmap_sg(r_xprt->rx_ia.ri_id->device,
98 mr->mr_sg, mr->mr_nents, mr->mr_dir);
99 mr->mr_dir = DMA_NONE;
100 }
101
102 spin_lock(&r_xprt->rx_buf.rb_lock);
103 list_del(&mr->mr_all);
104 r_xprt->rx_stats.mrs_recycled++;
105 spin_unlock(&r_xprt->rx_buf.rb_lock);
106
107 frwr_release_mr(mr);
108 }
109
110
111
112
113 static void
114 frwr_mr_recycle_worker(struct work_struct *work)
115 {
116 struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr,
117 mr_recycle);
118
119 frwr_mr_recycle(mr->mr_xprt, mr);
120 }
121
122
123
124
125
126
127
128 void frwr_recycle(struct rpcrdma_req *req)
129 {
130 struct rpcrdma_mr *mr;
131
132 while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
133 frwr_mr_recycle(mr->mr_xprt, mr);
134 }
135
136
137
138
139
140
141
142
143
144
145
146 void frwr_reset(struct rpcrdma_req *req)
147 {
148 struct rpcrdma_mr *mr;
149
150 while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
151 rpcrdma_mr_put(mr);
152 }
153
154
155
156
157
158
159
160
161
162 int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
163 {
164 unsigned int depth = ia->ri_max_frwr_depth;
165 struct scatterlist *sg;
166 struct ib_mr *frmr;
167 int rc;
168
169
170
171
172 frmr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
173 if (IS_ERR(frmr))
174 goto out_mr_err;
175
176 sg = kcalloc(depth, sizeof(*sg), GFP_NOFS);
177 if (!sg)
178 goto out_list_err;
179
180 mr->frwr.fr_mr = frmr;
181 mr->mr_dir = DMA_NONE;
182 INIT_LIST_HEAD(&mr->mr_list);
183 INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
184 init_completion(&mr->frwr.fr_linv_done);
185
186 sg_init_table(sg, depth);
187 mr->mr_sg = sg;
188 return 0;
189
190 out_mr_err:
191 rc = PTR_ERR(frmr);
192 trace_xprtrdma_frwr_alloc(mr, rc);
193 return rc;
194
195 out_list_err:
196 ib_dereg_mr(frmr);
197 return -ENOMEM;
198 }
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217 int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
218 {
219 struct ib_device_attr *attrs = &ia->ri_id->device->attrs;
220 int max_qp_wr, depth, delta;
221
222 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
223 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
224 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
225
226
227
228
229
230 if (attrs->max_sge_rd > 1)
231 ia->ri_max_frwr_depth = attrs->max_sge_rd;
232 else
233 ia->ri_max_frwr_depth = attrs->max_fast_reg_page_list_len;
234 if (ia->ri_max_frwr_depth > RPCRDMA_MAX_DATA_SEGS)
235 ia->ri_max_frwr_depth = RPCRDMA_MAX_DATA_SEGS;
236 dprintk("RPC: %s: max FR page list depth = %u\n",
237 __func__, ia->ri_max_frwr_depth);
238
239
240
241
242
243
244
245
246
247
248 depth = 7;
249
250
251
252
253 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
254 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
255 do {
256 depth += 2;
257 delta -= ia->ri_max_frwr_depth;
258 } while (delta > 0);
259 }
260
261 max_qp_wr = ia->ri_id->device->attrs.max_qp_wr;
262 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
263 max_qp_wr -= 1;
264 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
265 return -ENOMEM;
266 if (ep->rep_max_requests > max_qp_wr)
267 ep->rep_max_requests = max_qp_wr;
268 ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
269 if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
270 ep->rep_max_requests = max_qp_wr / depth;
271 if (!ep->rep_max_requests)
272 return -EINVAL;
273 ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
274 }
275 ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
276 ep->rep_attr.cap.max_send_wr += 1;
277 ep->rep_attr.cap.max_recv_wr = ep->rep_max_requests;
278 ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
279 ep->rep_attr.cap.max_recv_wr += 1;
280
281 ia->ri_max_segs =
282 DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ia->ri_max_frwr_depth);
283
284 ia->ri_max_segs += 2;
285 if (ia->ri_max_segs > RPCRDMA_MAX_HDR_SEGS)
286 ia->ri_max_segs = RPCRDMA_MAX_HDR_SEGS;
287 return 0;
288 }
289
290
291
292
293
294
295
296
297
298
299 size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt)
300 {
301 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
302
303 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
304 (ia->ri_max_segs - 2) * ia->ri_max_frwr_depth);
305 }
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
323 struct rpcrdma_mr_seg *seg,
324 int nsegs, bool writing, __be32 xid,
325 struct rpcrdma_mr *mr)
326 {
327 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
328 struct ib_reg_wr *reg_wr;
329 int i, n, dma_nents;
330 struct ib_mr *ibmr;
331 u8 key;
332
333 if (nsegs > ia->ri_max_frwr_depth)
334 nsegs = ia->ri_max_frwr_depth;
335 for (i = 0; i < nsegs;) {
336 if (seg->mr_page)
337 sg_set_page(&mr->mr_sg[i],
338 seg->mr_page,
339 seg->mr_len,
340 offset_in_page(seg->mr_offset));
341 else
342 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
343 seg->mr_len);
344
345 ++seg;
346 ++i;
347 if (ia->ri_mrtype == IB_MR_TYPE_SG_GAPS)
348 continue;
349 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
350 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
351 break;
352 }
353 mr->mr_dir = rpcrdma_data_dir(writing);
354 mr->mr_nents = i;
355
356 dma_nents = ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, mr->mr_nents,
357 mr->mr_dir);
358 if (!dma_nents)
359 goto out_dmamap_err;
360
361 ibmr = mr->frwr.fr_mr;
362 n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
363 if (n != dma_nents)
364 goto out_mapmr_err;
365
366 ibmr->iova &= 0x00000000ffffffff;
367 ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
368 key = (u8)(ibmr->rkey & 0x000000FF);
369 ib_update_fast_reg_key(ibmr, ++key);
370
371 reg_wr = &mr->frwr.fr_regwr;
372 reg_wr->mr = ibmr;
373 reg_wr->key = ibmr->rkey;
374 reg_wr->access = writing ?
375 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
376 IB_ACCESS_REMOTE_READ;
377
378 mr->mr_handle = ibmr->rkey;
379 mr->mr_length = ibmr->length;
380 mr->mr_offset = ibmr->iova;
381 trace_xprtrdma_mr_map(mr);
382
383 return seg;
384
385 out_dmamap_err:
386 mr->mr_dir = DMA_NONE;
387 trace_xprtrdma_frwr_sgerr(mr, i);
388 return ERR_PTR(-EIO);
389
390 out_mapmr_err:
391 trace_xprtrdma_frwr_maperr(mr, n);
392 return ERR_PTR(-EIO);
393 }
394
395
396
397
398
399
400
401 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
402 {
403 struct ib_cqe *cqe = wc->wr_cqe;
404 struct rpcrdma_frwr *frwr =
405 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
406
407
408 trace_xprtrdma_wc_fastreg(wc, frwr);
409
410 }
411
412
413
414
415
416
417
418
419
420
421
422
423 int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
424 {
425 struct ib_send_wr *post_wr;
426 struct rpcrdma_mr *mr;
427
428 post_wr = &req->rl_sendctx->sc_wr;
429 list_for_each_entry(mr, &req->rl_registered, mr_list) {
430 struct rpcrdma_frwr *frwr;
431
432 frwr = &mr->frwr;
433
434 frwr->fr_cqe.done = frwr_wc_fastreg;
435 frwr->fr_regwr.wr.next = post_wr;
436 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
437 frwr->fr_regwr.wr.num_sge = 0;
438 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
439 frwr->fr_regwr.wr.send_flags = 0;
440
441 post_wr = &frwr->fr_regwr.wr;
442 }
443
444
445
446
447 return ib_post_send(ia->ri_id->qp, post_wr, NULL);
448 }
449
450
451
452
453
454
455
456 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
457 {
458 struct rpcrdma_mr *mr;
459
460 list_for_each_entry(mr, mrs, mr_list)
461 if (mr->mr_handle == rep->rr_inv_rkey) {
462 list_del_init(&mr->mr_list);
463 trace_xprtrdma_mr_remoteinv(mr);
464 rpcrdma_mr_put(mr);
465 break;
466 }
467 }
468
469 static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr)
470 {
471 if (wc->status != IB_WC_SUCCESS)
472 rpcrdma_mr_recycle(mr);
473 else
474 rpcrdma_mr_put(mr);
475 }
476
477
478
479
480
481
482
483 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
484 {
485 struct ib_cqe *cqe = wc->wr_cqe;
486 struct rpcrdma_frwr *frwr =
487 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
488 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
489
490
491 trace_xprtrdma_wc_li(wc, frwr);
492 __frwr_release_mr(wc, mr);
493 }
494
495
496
497
498
499
500
501
502 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
503 {
504 struct ib_cqe *cqe = wc->wr_cqe;
505 struct rpcrdma_frwr *frwr =
506 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
507 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
508
509
510 trace_xprtrdma_wc_li_wake(wc, frwr);
511 __frwr_release_mr(wc, mr);
512 complete(&frwr->fr_linv_done);
513 }
514
515
516
517
518
519
520
521
522
523
524
525
526 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
527 {
528 struct ib_send_wr *first, **prev, *last;
529 const struct ib_send_wr *bad_wr;
530 struct rpcrdma_frwr *frwr;
531 struct rpcrdma_mr *mr;
532 int rc;
533
534
535
536
537
538
539 frwr = NULL;
540 prev = &first;
541 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
542
543 trace_xprtrdma_mr_localinv(mr);
544 r_xprt->rx_stats.local_inv_needed++;
545
546 frwr = &mr->frwr;
547 frwr->fr_cqe.done = frwr_wc_localinv;
548 last = &frwr->fr_invwr;
549 last->next = NULL;
550 last->wr_cqe = &frwr->fr_cqe;
551 last->sg_list = NULL;
552 last->num_sge = 0;
553 last->opcode = IB_WR_LOCAL_INV;
554 last->send_flags = IB_SEND_SIGNALED;
555 last->ex.invalidate_rkey = mr->mr_handle;
556
557 *prev = last;
558 prev = &last->next;
559 }
560
561
562
563
564
565 frwr->fr_cqe.done = frwr_wc_localinv_wake;
566 reinit_completion(&frwr->fr_linv_done);
567
568
569
570
571
572 bad_wr = NULL;
573 rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
574
575
576
577
578
579 if (bad_wr != first)
580 wait_for_completion(&frwr->fr_linv_done);
581 if (!rc)
582 return;
583
584
585
586 trace_xprtrdma_post_linv(req, rc);
587 while (bad_wr) {
588 frwr = container_of(bad_wr, struct rpcrdma_frwr,
589 fr_invwr);
590 mr = container_of(frwr, struct rpcrdma_mr, frwr);
591 bad_wr = bad_wr->next;
592
593 list_del_init(&mr->mr_list);
594 rpcrdma_mr_recycle(mr);
595 }
596 }
597
598
599
600
601
602
603
604 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
605 {
606 struct ib_cqe *cqe = wc->wr_cqe;
607 struct rpcrdma_frwr *frwr =
608 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
609 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
610 struct rpcrdma_rep *rep = mr->mr_req->rl_reply;
611
612
613 trace_xprtrdma_wc_li_done(wc, frwr);
614 __frwr_release_mr(wc, mr);
615
616
617 smp_rmb();
618 rpcrdma_complete_rqst(rep);
619 }
620
621
622
623
624
625
626
627
628
629
630
631 void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
632 {
633 struct ib_send_wr *first, *last, **prev;
634 const struct ib_send_wr *bad_wr;
635 struct rpcrdma_frwr *frwr;
636 struct rpcrdma_mr *mr;
637 int rc;
638
639
640
641
642 frwr = NULL;
643 prev = &first;
644 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
645
646 trace_xprtrdma_mr_localinv(mr);
647 r_xprt->rx_stats.local_inv_needed++;
648
649 frwr = &mr->frwr;
650 frwr->fr_cqe.done = frwr_wc_localinv;
651 last = &frwr->fr_invwr;
652 last->next = NULL;
653 last->wr_cqe = &frwr->fr_cqe;
654 last->sg_list = NULL;
655 last->num_sge = 0;
656 last->opcode = IB_WR_LOCAL_INV;
657 last->send_flags = IB_SEND_SIGNALED;
658 last->ex.invalidate_rkey = mr->mr_handle;
659
660 *prev = last;
661 prev = &last->next;
662 }
663
664
665
666
667
668
669 frwr->fr_cqe.done = frwr_wc_localinv_done;
670
671
672
673
674
675 bad_wr = NULL;
676 rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
677 if (!rc)
678 return;
679
680
681
682 trace_xprtrdma_post_linv(req, rc);
683 while (bad_wr) {
684 frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
685 mr = container_of(frwr, struct rpcrdma_mr, frwr);
686 bad_wr = bad_wr->next;
687
688 rpcrdma_mr_recycle(mr);
689 }
690
691
692
693
694
695 rpcrdma_complete_rqst(req->rl_reply);
696 }