This source file includes following definitions.
- svc_rdma_next_ctxt
- svc_rdma_get_rw_ctxt
- svc_rdma_put_rw_ctxt
- svc_rdma_destroy_rw_ctxts
- svc_rdma_cc_init
- svc_rdma_cc_release
- svc_rdma_write_info_alloc
- svc_rdma_write_info_free
- svc_rdma_write_done
- svc_rdma_read_info_alloc
- svc_rdma_read_info_free
- svc_rdma_wc_read_done
- svc_rdma_post_chunk_ctxt
- svc_rdma_vec_to_sg
- svc_rdma_pagelist_to_sg
- svc_rdma_build_writes
- svc_rdma_send_xdr_kvec
- svc_rdma_send_xdr_pagelist
- svc_rdma_send_write_chunk
- svc_rdma_send_reply_chunk
- svc_rdma_build_read_segment
- svc_rdma_build_read_chunk
- svc_rdma_build_normal_read_chunk
- svc_rdma_build_pz_read_chunk
- svc_rdma_recv_read_chunk
1
2
3
4
5
6
7
8 #include <rdma/rw.h>
9
10 #include <linux/sunrpc/rpc_rdma.h>
11 #include <linux/sunrpc/svc_rdma.h>
12 #include <linux/sunrpc/debug.h>
13
14 #include "xprt_rdma.h"
15 #include <trace/events/rpcrdma.h>
16
17 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
18
19 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
20 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39 struct svc_rdma_rw_ctxt {
40 struct list_head rw_list;
41 struct rdma_rw_ctx rw_ctx;
42 int rw_nents;
43 struct sg_table rw_sg_table;
44 struct scatterlist rw_first_sgl[0];
45 };
46
47 static inline struct svc_rdma_rw_ctxt *
48 svc_rdma_next_ctxt(struct list_head *list)
49 {
50 return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
51 rw_list);
52 }
53
54 static struct svc_rdma_rw_ctxt *
55 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
56 {
57 struct svc_rdma_rw_ctxt *ctxt;
58
59 spin_lock(&rdma->sc_rw_ctxt_lock);
60
61 ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
62 if (ctxt) {
63 list_del(&ctxt->rw_list);
64 spin_unlock(&rdma->sc_rw_ctxt_lock);
65 } else {
66 spin_unlock(&rdma->sc_rw_ctxt_lock);
67 ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
68 GFP_KERNEL);
69 if (!ctxt)
70 goto out;
71 INIT_LIST_HEAD(&ctxt->rw_list);
72 }
73
74 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
75 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
76 ctxt->rw_sg_table.sgl,
77 SG_CHUNK_SIZE)) {
78 kfree(ctxt);
79 ctxt = NULL;
80 }
81 out:
82 return ctxt;
83 }
84
85 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
86 struct svc_rdma_rw_ctxt *ctxt)
87 {
88 sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
89
90 spin_lock(&rdma->sc_rw_ctxt_lock);
91 list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
92 spin_unlock(&rdma->sc_rw_ctxt_lock);
93 }
94
95
96
97
98
99
100 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
101 {
102 struct svc_rdma_rw_ctxt *ctxt;
103
104 while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
105 list_del(&ctxt->rw_list);
106 kfree(ctxt);
107 }
108 }
109
110
111
112
113
114
115
116
117
118 struct svc_rdma_chunk_ctxt {
119 struct ib_cqe cc_cqe;
120 struct svcxprt_rdma *cc_rdma;
121 struct list_head cc_rwctxts;
122 int cc_sqecount;
123 };
124
125 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
126 struct svc_rdma_chunk_ctxt *cc)
127 {
128 cc->cc_rdma = rdma;
129 svc_xprt_get(&rdma->sc_xprt);
130
131 INIT_LIST_HEAD(&cc->cc_rwctxts);
132 cc->cc_sqecount = 0;
133 }
134
135 static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
136 enum dma_data_direction dir)
137 {
138 struct svcxprt_rdma *rdma = cc->cc_rdma;
139 struct svc_rdma_rw_ctxt *ctxt;
140
141 while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
142 list_del(&ctxt->rw_list);
143
144 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
145 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
146 ctxt->rw_nents, dir);
147 svc_rdma_put_rw_ctxt(rdma, ctxt);
148 }
149 svc_xprt_put(&rdma->sc_xprt);
150 }
151
152
153
154
155
156 struct svc_rdma_write_info {
157
158 unsigned int wi_seg_off;
159 unsigned int wi_seg_no;
160 unsigned int wi_nsegs;
161 __be32 *wi_segs;
162
163
164 struct xdr_buf *wi_xdr;
165 unsigned char *wi_base;
166 unsigned int wi_next_off;
167
168 struct svc_rdma_chunk_ctxt wi_cc;
169 };
170
171 static struct svc_rdma_write_info *
172 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
173 {
174 struct svc_rdma_write_info *info;
175
176 info = kmalloc(sizeof(*info), GFP_KERNEL);
177 if (!info)
178 return info;
179
180 info->wi_seg_off = 0;
181 info->wi_seg_no = 0;
182 info->wi_nsegs = be32_to_cpup(++chunk);
183 info->wi_segs = ++chunk;
184 svc_rdma_cc_init(rdma, &info->wi_cc);
185 info->wi_cc.cc_cqe.done = svc_rdma_write_done;
186 return info;
187 }
188
189 static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
190 {
191 svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
192 kfree(info);
193 }
194
195
196
197
198
199
200
201
202 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
203 {
204 struct ib_cqe *cqe = wc->wr_cqe;
205 struct svc_rdma_chunk_ctxt *cc =
206 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
207 struct svcxprt_rdma *rdma = cc->cc_rdma;
208 struct svc_rdma_write_info *info =
209 container_of(cc, struct svc_rdma_write_info, wi_cc);
210
211 trace_svcrdma_wc_write(wc);
212
213 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
214 wake_up(&rdma->sc_send_wait);
215
216 if (unlikely(wc->status != IB_WC_SUCCESS))
217 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
218
219 svc_rdma_write_info_free(info);
220 }
221
222
223
224 struct svc_rdma_read_info {
225 struct svc_rdma_recv_ctxt *ri_readctxt;
226 unsigned int ri_position;
227 unsigned int ri_pageno;
228 unsigned int ri_pageoff;
229 unsigned int ri_chunklen;
230
231 struct svc_rdma_chunk_ctxt ri_cc;
232 };
233
234 static struct svc_rdma_read_info *
235 svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
236 {
237 struct svc_rdma_read_info *info;
238
239 info = kmalloc(sizeof(*info), GFP_KERNEL);
240 if (!info)
241 return info;
242
243 svc_rdma_cc_init(rdma, &info->ri_cc);
244 info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
245 return info;
246 }
247
248 static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
249 {
250 svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
251 kfree(info);
252 }
253
254
255
256
257
258
259
260 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
261 {
262 struct ib_cqe *cqe = wc->wr_cqe;
263 struct svc_rdma_chunk_ctxt *cc =
264 container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
265 struct svcxprt_rdma *rdma = cc->cc_rdma;
266 struct svc_rdma_read_info *info =
267 container_of(cc, struct svc_rdma_read_info, ri_cc);
268
269 trace_svcrdma_wc_read(wc);
270
271 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
272 wake_up(&rdma->sc_send_wait);
273
274 if (unlikely(wc->status != IB_WC_SUCCESS)) {
275 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
276 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
277 } else {
278 spin_lock(&rdma->sc_rq_dto_lock);
279 list_add_tail(&info->ri_readctxt->rc_list,
280 &rdma->sc_read_complete_q);
281
282 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
283 spin_unlock(&rdma->sc_rq_dto_lock);
284
285 svc_xprt_enqueue(&rdma->sc_xprt);
286 }
287
288 svc_rdma_read_info_free(info);
289 }
290
291
292
293
294
295
296
297
298 static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
299 {
300 struct svcxprt_rdma *rdma = cc->cc_rdma;
301 struct svc_xprt *xprt = &rdma->sc_xprt;
302 struct ib_send_wr *first_wr;
303 const struct ib_send_wr *bad_wr;
304 struct list_head *tmp;
305 struct ib_cqe *cqe;
306 int ret;
307
308 if (cc->cc_sqecount > rdma->sc_sq_depth)
309 return -EINVAL;
310
311 first_wr = NULL;
312 cqe = &cc->cc_cqe;
313 list_for_each(tmp, &cc->cc_rwctxts) {
314 struct svc_rdma_rw_ctxt *ctxt;
315
316 ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
317 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
318 rdma->sc_port_num, cqe, first_wr);
319 cqe = NULL;
320 }
321
322 do {
323 if (atomic_sub_return(cc->cc_sqecount,
324 &rdma->sc_sq_avail) > 0) {
325 ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
326 if (ret)
327 break;
328 return 0;
329 }
330
331 trace_svcrdma_sq_full(rdma);
332 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
333 wait_event(rdma->sc_send_wait,
334 atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
335 trace_svcrdma_sq_retry(rdma);
336 } while (1);
337
338 trace_svcrdma_sq_post_err(rdma, ret);
339 set_bit(XPT_CLOSE, &xprt->xpt_flags);
340
341
342 if (bad_wr != first_wr)
343 return 0;
344
345 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
346 wake_up(&rdma->sc_send_wait);
347 return -ENOTCONN;
348 }
349
350
351
352 static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
353 unsigned int len,
354 struct svc_rdma_rw_ctxt *ctxt)
355 {
356 struct scatterlist *sg = ctxt->rw_sg_table.sgl;
357
358 sg_set_buf(&sg[0], info->wi_base, len);
359 info->wi_base += len;
360
361 ctxt->rw_nents = 1;
362 }
363
364
365
366 static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
367 unsigned int remaining,
368 struct svc_rdma_rw_ctxt *ctxt)
369 {
370 unsigned int sge_no, sge_bytes, page_off, page_no;
371 struct xdr_buf *xdr = info->wi_xdr;
372 struct scatterlist *sg;
373 struct page **page;
374
375 page_off = info->wi_next_off + xdr->page_base;
376 page_no = page_off >> PAGE_SHIFT;
377 page_off = offset_in_page(page_off);
378 page = xdr->pages + page_no;
379 info->wi_next_off += remaining;
380 sg = ctxt->rw_sg_table.sgl;
381 sge_no = 0;
382 do {
383 sge_bytes = min_t(unsigned int, remaining,
384 PAGE_SIZE - page_off);
385 sg_set_page(sg, *page, sge_bytes, page_off);
386
387 remaining -= sge_bytes;
388 sg = sg_next(sg);
389 page_off = 0;
390 sge_no++;
391 page++;
392 } while (remaining);
393
394 ctxt->rw_nents = sge_no;
395 }
396
397
398
399
400 static int
401 svc_rdma_build_writes(struct svc_rdma_write_info *info,
402 void (*constructor)(struct svc_rdma_write_info *info,
403 unsigned int len,
404 struct svc_rdma_rw_ctxt *ctxt),
405 unsigned int remaining)
406 {
407 struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
408 struct svcxprt_rdma *rdma = cc->cc_rdma;
409 struct svc_rdma_rw_ctxt *ctxt;
410 __be32 *seg;
411 int ret;
412
413 seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
414 do {
415 unsigned int write_len;
416 u32 seg_length, seg_handle;
417 u64 seg_offset;
418
419 if (info->wi_seg_no >= info->wi_nsegs)
420 goto out_overflow;
421
422 seg_handle = be32_to_cpup(seg);
423 seg_length = be32_to_cpup(seg + 1);
424 xdr_decode_hyper(seg + 2, &seg_offset);
425 seg_offset += info->wi_seg_off;
426
427 write_len = min(remaining, seg_length - info->wi_seg_off);
428 ctxt = svc_rdma_get_rw_ctxt(rdma,
429 (write_len >> PAGE_SHIFT) + 2);
430 if (!ctxt)
431 goto out_noctx;
432
433 constructor(info, write_len, ctxt);
434 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
435 rdma->sc_port_num, ctxt->rw_sg_table.sgl,
436 ctxt->rw_nents, 0, seg_offset,
437 seg_handle, DMA_TO_DEVICE);
438 if (ret < 0)
439 goto out_initerr;
440
441 trace_svcrdma_encode_wseg(seg_handle, write_len, seg_offset);
442 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
443 cc->cc_sqecount += ret;
444 if (write_len == seg_length - info->wi_seg_off) {
445 seg += 4;
446 info->wi_seg_no++;
447 info->wi_seg_off = 0;
448 } else {
449 info->wi_seg_off += write_len;
450 }
451 remaining -= write_len;
452 } while (remaining);
453
454 return 0;
455
456 out_overflow:
457 dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
458 info->wi_nsegs);
459 return -E2BIG;
460
461 out_noctx:
462 dprintk("svcrdma: no R/W ctxs available\n");
463 return -ENOMEM;
464
465 out_initerr:
466 svc_rdma_put_rw_ctxt(rdma, ctxt);
467 trace_svcrdma_dma_map_rwctx(rdma, ret);
468 return -EIO;
469 }
470
471
472
473
474
475
476 static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
477 struct kvec *vec)
478 {
479 info->wi_base = vec->iov_base;
480 return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
481 vec->iov_len);
482 }
483
484
485
486
487
488
489 static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
490 struct xdr_buf *xdr)
491 {
492 info->wi_xdr = xdr;
493 info->wi_next_off = 0;
494 return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
495 xdr->page_len);
496 }
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
512 struct xdr_buf *xdr)
513 {
514 struct svc_rdma_write_info *info;
515 int ret;
516
517 if (!xdr->page_len)
518 return 0;
519
520 info = svc_rdma_write_info_alloc(rdma, wr_ch);
521 if (!info)
522 return -ENOMEM;
523
524 ret = svc_rdma_send_xdr_pagelist(info, xdr);
525 if (ret < 0)
526 goto out_err;
527
528 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
529 if (ret < 0)
530 goto out_err;
531
532 trace_svcrdma_encode_write(xdr->page_len);
533 return xdr->page_len;
534
535 out_err:
536 svc_rdma_write_info_free(info);
537 return ret;
538 }
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
555 bool writelist, struct xdr_buf *xdr)
556 {
557 struct svc_rdma_write_info *info;
558 int consumed, ret;
559
560 info = svc_rdma_write_info_alloc(rdma, rp_ch);
561 if (!info)
562 return -ENOMEM;
563
564 ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
565 if (ret < 0)
566 goto out_err;
567 consumed = xdr->head[0].iov_len;
568
569
570
571
572 if (!writelist && xdr->page_len) {
573 ret = svc_rdma_send_xdr_pagelist(info, xdr);
574 if (ret < 0)
575 goto out_err;
576 consumed += xdr->page_len;
577 }
578
579 if (xdr->tail[0].iov_len) {
580 ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
581 if (ret < 0)
582 goto out_err;
583 consumed += xdr->tail[0].iov_len;
584 }
585
586 ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
587 if (ret < 0)
588 goto out_err;
589
590 trace_svcrdma_encode_reply(consumed);
591 return consumed;
592
593 out_err:
594 svc_rdma_write_info_free(info);
595 return ret;
596 }
597
598 static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
599 struct svc_rqst *rqstp,
600 u32 rkey, u32 len, u64 offset)
601 {
602 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
603 struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
604 struct svc_rdma_rw_ctxt *ctxt;
605 unsigned int sge_no, seg_len;
606 struct scatterlist *sg;
607 int ret;
608
609 sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
610 ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
611 if (!ctxt)
612 goto out_noctx;
613 ctxt->rw_nents = sge_no;
614
615 sg = ctxt->rw_sg_table.sgl;
616 for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
617 seg_len = min_t(unsigned int, len,
618 PAGE_SIZE - info->ri_pageoff);
619
620 head->rc_arg.pages[info->ri_pageno] =
621 rqstp->rq_pages[info->ri_pageno];
622 if (!info->ri_pageoff)
623 head->rc_page_count++;
624
625 sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
626 seg_len, info->ri_pageoff);
627 sg = sg_next(sg);
628
629 info->ri_pageoff += seg_len;
630 if (info->ri_pageoff == PAGE_SIZE) {
631 info->ri_pageno++;
632 info->ri_pageoff = 0;
633 }
634 len -= seg_len;
635
636
637 if (len &&
638 &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
639 goto out_overrun;
640 }
641
642 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
643 cc->cc_rdma->sc_port_num,
644 ctxt->rw_sg_table.sgl, ctxt->rw_nents,
645 0, offset, rkey, DMA_FROM_DEVICE);
646 if (ret < 0)
647 goto out_initerr;
648
649 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
650 cc->cc_sqecount += ret;
651 return 0;
652
653 out_noctx:
654 dprintk("svcrdma: no R/W ctxs available\n");
655 return -ENOMEM;
656
657 out_overrun:
658 dprintk("svcrdma: request overruns rq_pages\n");
659 return -EINVAL;
660
661 out_initerr:
662 trace_svcrdma_dma_map_rwctx(cc->cc_rdma, ret);
663 svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
664 return -EIO;
665 }
666
667
668
669
670 static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
671 struct svc_rdma_read_info *info,
672 __be32 *p)
673 {
674 unsigned int i;
675 int ret;
676
677 ret = -EINVAL;
678 info->ri_chunklen = 0;
679 while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
680 u32 rs_handle, rs_length;
681 u64 rs_offset;
682
683 rs_handle = be32_to_cpup(p++);
684 rs_length = be32_to_cpup(p++);
685 p = xdr_decode_hyper(p, &rs_offset);
686
687 ret = svc_rdma_build_read_segment(info, rqstp,
688 rs_handle, rs_length,
689 rs_offset);
690 if (ret < 0)
691 break;
692
693 trace_svcrdma_encode_rseg(rs_handle, rs_length, rs_offset);
694 info->ri_chunklen += rs_length;
695 }
696
697
698
699
700 for (i = 0; i < info->ri_readctxt->rc_page_count; i++)
701 rqstp->rq_pages[i] = NULL;
702
703 return ret;
704 }
705
706
707
708
709
710
711
712
713 static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
714 struct svc_rdma_read_info *info,
715 __be32 *p)
716 {
717 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
718 int ret;
719
720 ret = svc_rdma_build_read_chunk(rqstp, info, p);
721 if (ret < 0)
722 goto out;
723
724 trace_svcrdma_encode_read(info->ri_chunklen, info->ri_position);
725
726 head->rc_hdr_count = 0;
727
728
729
730
731
732
733 head->rc_arg.tail[0].iov_base =
734 head->rc_arg.head[0].iov_base + info->ri_position;
735 head->rc_arg.tail[0].iov_len =
736 head->rc_arg.head[0].iov_len - info->ri_position;
737 head->rc_arg.head[0].iov_len = info->ri_position;
738
739
740
741
742
743
744
745
746
747
748 info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
749
750 head->rc_arg.page_len = info->ri_chunklen;
751 head->rc_arg.len += info->ri_chunklen;
752 head->rc_arg.buflen += info->ri_chunklen;
753
754 out:
755 return ret;
756 }
757
758
759
760
761
762
763
764
765
766
767
768
769 static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
770 struct svc_rdma_read_info *info,
771 __be32 *p)
772 {
773 struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
774 int ret;
775
776 ret = svc_rdma_build_read_chunk(rqstp, info, p);
777 if (ret < 0)
778 goto out;
779
780 trace_svcrdma_encode_pzr(info->ri_chunklen);
781
782 head->rc_arg.len += info->ri_chunklen;
783 head->rc_arg.buflen += info->ri_chunklen;
784
785 head->rc_hdr_count = 1;
786 head->rc_arg.head[0].iov_base = page_address(head->rc_pages[0]);
787 head->rc_arg.head[0].iov_len = min_t(size_t, PAGE_SIZE,
788 info->ri_chunklen);
789
790 head->rc_arg.page_len = info->ri_chunklen -
791 head->rc_arg.head[0].iov_len;
792
793 out:
794 return ret;
795 }
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814 int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
815 struct svc_rdma_recv_ctxt *head, __be32 *p)
816 {
817 struct svc_rdma_read_info *info;
818 int ret;
819
820
821
822
823
824 head->rc_arg.head[0] = rqstp->rq_arg.head[0];
825 head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
826 head->rc_arg.pages = head->rc_pages;
827 head->rc_arg.page_base = 0;
828 head->rc_arg.page_len = 0;
829 head->rc_arg.len = rqstp->rq_arg.len;
830 head->rc_arg.buflen = rqstp->rq_arg.buflen;
831
832 info = svc_rdma_read_info_alloc(rdma);
833 if (!info)
834 return -ENOMEM;
835 info->ri_readctxt = head;
836 info->ri_pageno = 0;
837 info->ri_pageoff = 0;
838
839 info->ri_position = be32_to_cpup(p + 1);
840 if (info->ri_position)
841 ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
842 else
843 ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
844 if (ret < 0)
845 goto out_err;
846
847 ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
848 if (ret < 0)
849 goto out_err;
850 return 0;
851
852 out_err:
853 svc_rdma_read_info_free(info);
854 return ret;
855 }