root/net/sunrpc/xprtrdma/svc_rdma_rw.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. svc_rdma_next_ctxt
  2. svc_rdma_get_rw_ctxt
  3. svc_rdma_put_rw_ctxt
  4. svc_rdma_destroy_rw_ctxts
  5. svc_rdma_cc_init
  6. svc_rdma_cc_release
  7. svc_rdma_write_info_alloc
  8. svc_rdma_write_info_free
  9. svc_rdma_write_done
  10. svc_rdma_read_info_alloc
  11. svc_rdma_read_info_free
  12. svc_rdma_wc_read_done
  13. svc_rdma_post_chunk_ctxt
  14. svc_rdma_vec_to_sg
  15. svc_rdma_pagelist_to_sg
  16. svc_rdma_build_writes
  17. svc_rdma_send_xdr_kvec
  18. svc_rdma_send_xdr_pagelist
  19. svc_rdma_send_write_chunk
  20. svc_rdma_send_reply_chunk
  21. svc_rdma_build_read_segment
  22. svc_rdma_build_read_chunk
  23. svc_rdma_build_normal_read_chunk
  24. svc_rdma_build_pz_read_chunk
  25. svc_rdma_recv_read_chunk

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2016-2018 Oracle.  All rights reserved.
   4  *
   5  * Use the core R/W API to move RPC-over-RDMA Read and Write chunks.
   6  */
   7 
   8 #include <rdma/rw.h>
   9 
  10 #include <linux/sunrpc/rpc_rdma.h>
  11 #include <linux/sunrpc/svc_rdma.h>
  12 #include <linux/sunrpc/debug.h>
  13 
  14 #include "xprt_rdma.h"
  15 #include <trace/events/rpcrdma.h>
  16 
  17 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
  18 
  19 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
  20 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
  21 
  22 /* Each R/W context contains state for one chain of RDMA Read or
  23  * Write Work Requests.
  24  *
  25  * Each WR chain handles a single contiguous server-side buffer,
  26  * because scatterlist entries after the first have to start on
  27  * page alignment. xdr_buf iovecs cannot guarantee alignment.
  28  *
  29  * Each WR chain handles only one R_key. Each RPC-over-RDMA segment
  30  * from a client may contain a unique R_key, so each WR chain moves
  31  * up to one segment at a time.
  32  *
  33  * The scatterlist makes this data structure over 4KB in size. To
  34  * make it less likely to fail, and to handle the allocation for
  35  * smaller I/O requests without disabling bottom-halves, these
  36  * contexts are created on demand, but cached and reused until the
  37  * controlling svcxprt_rdma is destroyed.
  38  */
  39 struct svc_rdma_rw_ctxt {
  40         struct list_head        rw_list;
  41         struct rdma_rw_ctx      rw_ctx;
  42         int                     rw_nents;
  43         struct sg_table         rw_sg_table;
  44         struct scatterlist      rw_first_sgl[0];
  45 };
  46 
  47 static inline struct svc_rdma_rw_ctxt *
  48 svc_rdma_next_ctxt(struct list_head *list)
  49 {
  50         return list_first_entry_or_null(list, struct svc_rdma_rw_ctxt,
  51                                         rw_list);
  52 }
  53 
  54 static struct svc_rdma_rw_ctxt *
  55 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges)
  56 {
  57         struct svc_rdma_rw_ctxt *ctxt;
  58 
  59         spin_lock(&rdma->sc_rw_ctxt_lock);
  60 
  61         ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts);
  62         if (ctxt) {
  63                 list_del(&ctxt->rw_list);
  64                 spin_unlock(&rdma->sc_rw_ctxt_lock);
  65         } else {
  66                 spin_unlock(&rdma->sc_rw_ctxt_lock);
  67                 ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE),
  68                                GFP_KERNEL);
  69                 if (!ctxt)
  70                         goto out;
  71                 INIT_LIST_HEAD(&ctxt->rw_list);
  72         }
  73 
  74         ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl;
  75         if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges,
  76                                    ctxt->rw_sg_table.sgl,
  77                                    SG_CHUNK_SIZE)) {
  78                 kfree(ctxt);
  79                 ctxt = NULL;
  80         }
  81 out:
  82         return ctxt;
  83 }
  84 
  85 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma,
  86                                  struct svc_rdma_rw_ctxt *ctxt)
  87 {
  88         sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE);
  89 
  90         spin_lock(&rdma->sc_rw_ctxt_lock);
  91         list_add(&ctxt->rw_list, &rdma->sc_rw_ctxts);
  92         spin_unlock(&rdma->sc_rw_ctxt_lock);
  93 }
  94 
  95 /**
  96  * svc_rdma_destroy_rw_ctxts - Free accumulated R/W contexts
  97  * @rdma: transport about to be destroyed
  98  *
  99  */
 100 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma)
 101 {
 102         struct svc_rdma_rw_ctxt *ctxt;
 103 
 104         while ((ctxt = svc_rdma_next_ctxt(&rdma->sc_rw_ctxts)) != NULL) {
 105                 list_del(&ctxt->rw_list);
 106                 kfree(ctxt);
 107         }
 108 }
 109 
 110 /* A chunk context tracks all I/O for moving one Read or Write
 111  * chunk. This is a a set of rdma_rw's that handle data movement
 112  * for all segments of one chunk.
 113  *
 114  * These are small, acquired with a single allocator call, and
 115  * no more than one is needed per chunk. They are allocated on
 116  * demand, and not cached.
 117  */
 118 struct svc_rdma_chunk_ctxt {
 119         struct ib_cqe           cc_cqe;
 120         struct svcxprt_rdma     *cc_rdma;
 121         struct list_head        cc_rwctxts;
 122         int                     cc_sqecount;
 123 };
 124 
 125 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
 126                              struct svc_rdma_chunk_ctxt *cc)
 127 {
 128         cc->cc_rdma = rdma;
 129         svc_xprt_get(&rdma->sc_xprt);
 130 
 131         INIT_LIST_HEAD(&cc->cc_rwctxts);
 132         cc->cc_sqecount = 0;
 133 }
 134 
 135 static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc,
 136                                 enum dma_data_direction dir)
 137 {
 138         struct svcxprt_rdma *rdma = cc->cc_rdma;
 139         struct svc_rdma_rw_ctxt *ctxt;
 140 
 141         while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) {
 142                 list_del(&ctxt->rw_list);
 143 
 144                 rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp,
 145                                     rdma->sc_port_num, ctxt->rw_sg_table.sgl,
 146                                     ctxt->rw_nents, dir);
 147                 svc_rdma_put_rw_ctxt(rdma, ctxt);
 148         }
 149         svc_xprt_put(&rdma->sc_xprt);
 150 }
 151 
 152 /* State for sending a Write or Reply chunk.
 153  *  - Tracks progress of writing one chunk over all its segments
 154  *  - Stores arguments for the SGL constructor functions
 155  */
 156 struct svc_rdma_write_info {
 157         /* write state of this chunk */
 158         unsigned int            wi_seg_off;
 159         unsigned int            wi_seg_no;
 160         unsigned int            wi_nsegs;
 161         __be32                  *wi_segs;
 162 
 163         /* SGL constructor arguments */
 164         struct xdr_buf          *wi_xdr;
 165         unsigned char           *wi_base;
 166         unsigned int            wi_next_off;
 167 
 168         struct svc_rdma_chunk_ctxt      wi_cc;
 169 };
 170 
 171 static struct svc_rdma_write_info *
 172 svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, __be32 *chunk)
 173 {
 174         struct svc_rdma_write_info *info;
 175 
 176         info = kmalloc(sizeof(*info), GFP_KERNEL);
 177         if (!info)
 178                 return info;
 179 
 180         info->wi_seg_off = 0;
 181         info->wi_seg_no = 0;
 182         info->wi_nsegs = be32_to_cpup(++chunk);
 183         info->wi_segs = ++chunk;
 184         svc_rdma_cc_init(rdma, &info->wi_cc);
 185         info->wi_cc.cc_cqe.done = svc_rdma_write_done;
 186         return info;
 187 }
 188 
 189 static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
 190 {
 191         svc_rdma_cc_release(&info->wi_cc, DMA_TO_DEVICE);
 192         kfree(info);
 193 }
 194 
 195 /**
 196  * svc_rdma_write_done - Write chunk completion
 197  * @cq: controlling Completion Queue
 198  * @wc: Work Completion
 199  *
 200  * Pages under I/O are freed by a subsequent Send completion.
 201  */
 202 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
 203 {
 204         struct ib_cqe *cqe = wc->wr_cqe;
 205         struct svc_rdma_chunk_ctxt *cc =
 206                         container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
 207         struct svcxprt_rdma *rdma = cc->cc_rdma;
 208         struct svc_rdma_write_info *info =
 209                         container_of(cc, struct svc_rdma_write_info, wi_cc);
 210 
 211         trace_svcrdma_wc_write(wc);
 212 
 213         atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
 214         wake_up(&rdma->sc_send_wait);
 215 
 216         if (unlikely(wc->status != IB_WC_SUCCESS))
 217                 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
 218 
 219         svc_rdma_write_info_free(info);
 220 }
 221 
 222 /* State for pulling a Read chunk.
 223  */
 224 struct svc_rdma_read_info {
 225         struct svc_rdma_recv_ctxt       *ri_readctxt;
 226         unsigned int                    ri_position;
 227         unsigned int                    ri_pageno;
 228         unsigned int                    ri_pageoff;
 229         unsigned int                    ri_chunklen;
 230 
 231         struct svc_rdma_chunk_ctxt      ri_cc;
 232 };
 233 
 234 static struct svc_rdma_read_info *
 235 svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma)
 236 {
 237         struct svc_rdma_read_info *info;
 238 
 239         info = kmalloc(sizeof(*info), GFP_KERNEL);
 240         if (!info)
 241                 return info;
 242 
 243         svc_rdma_cc_init(rdma, &info->ri_cc);
 244         info->ri_cc.cc_cqe.done = svc_rdma_wc_read_done;
 245         return info;
 246 }
 247 
 248 static void svc_rdma_read_info_free(struct svc_rdma_read_info *info)
 249 {
 250         svc_rdma_cc_release(&info->ri_cc, DMA_FROM_DEVICE);
 251         kfree(info);
 252 }
 253 
 254 /**
 255  * svc_rdma_wc_read_done - Handle completion of an RDMA Read ctx
 256  * @cq: controlling Completion Queue
 257  * @wc: Work Completion
 258  *
 259  */
 260 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
 261 {
 262         struct ib_cqe *cqe = wc->wr_cqe;
 263         struct svc_rdma_chunk_ctxt *cc =
 264                         container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
 265         struct svcxprt_rdma *rdma = cc->cc_rdma;
 266         struct svc_rdma_read_info *info =
 267                         container_of(cc, struct svc_rdma_read_info, ri_cc);
 268 
 269         trace_svcrdma_wc_read(wc);
 270 
 271         atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
 272         wake_up(&rdma->sc_send_wait);
 273 
 274         if (unlikely(wc->status != IB_WC_SUCCESS)) {
 275                 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
 276                 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
 277         } else {
 278                 spin_lock(&rdma->sc_rq_dto_lock);
 279                 list_add_tail(&info->ri_readctxt->rc_list,
 280                               &rdma->sc_read_complete_q);
 281                 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
 282                 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
 283                 spin_unlock(&rdma->sc_rq_dto_lock);
 284 
 285                 svc_xprt_enqueue(&rdma->sc_xprt);
 286         }
 287 
 288         svc_rdma_read_info_free(info);
 289 }
 290 
 291 /* This function sleeps when the transport's Send Queue is congested.
 292  *
 293  * Assumptions:
 294  * - If ib_post_send() succeeds, only one completion is expected,
 295  *   even if one or more WRs are flushed. This is true when posting
 296  *   an rdma_rw_ctx or when posting a single signaled WR.
 297  */
 298 static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
 299 {
 300         struct svcxprt_rdma *rdma = cc->cc_rdma;
 301         struct svc_xprt *xprt = &rdma->sc_xprt;
 302         struct ib_send_wr *first_wr;
 303         const struct ib_send_wr *bad_wr;
 304         struct list_head *tmp;
 305         struct ib_cqe *cqe;
 306         int ret;
 307 
 308         if (cc->cc_sqecount > rdma->sc_sq_depth)
 309                 return -EINVAL;
 310 
 311         first_wr = NULL;
 312         cqe = &cc->cc_cqe;
 313         list_for_each(tmp, &cc->cc_rwctxts) {
 314                 struct svc_rdma_rw_ctxt *ctxt;
 315 
 316                 ctxt = list_entry(tmp, struct svc_rdma_rw_ctxt, rw_list);
 317                 first_wr = rdma_rw_ctx_wrs(&ctxt->rw_ctx, rdma->sc_qp,
 318                                            rdma->sc_port_num, cqe, first_wr);
 319                 cqe = NULL;
 320         }
 321 
 322         do {
 323                 if (atomic_sub_return(cc->cc_sqecount,
 324                                       &rdma->sc_sq_avail) > 0) {
 325                         ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
 326                         if (ret)
 327                                 break;
 328                         return 0;
 329                 }
 330 
 331                 trace_svcrdma_sq_full(rdma);
 332                 atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
 333                 wait_event(rdma->sc_send_wait,
 334                            atomic_read(&rdma->sc_sq_avail) > cc->cc_sqecount);
 335                 trace_svcrdma_sq_retry(rdma);
 336         } while (1);
 337 
 338         trace_svcrdma_sq_post_err(rdma, ret);
 339         set_bit(XPT_CLOSE, &xprt->xpt_flags);
 340 
 341         /* If even one was posted, there will be a completion. */
 342         if (bad_wr != first_wr)
 343                 return 0;
 344 
 345         atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
 346         wake_up(&rdma->sc_send_wait);
 347         return -ENOTCONN;
 348 }
 349 
 350 /* Build and DMA-map an SGL that covers one kvec in an xdr_buf
 351  */
 352 static void svc_rdma_vec_to_sg(struct svc_rdma_write_info *info,
 353                                unsigned int len,
 354                                struct svc_rdma_rw_ctxt *ctxt)
 355 {
 356         struct scatterlist *sg = ctxt->rw_sg_table.sgl;
 357 
 358         sg_set_buf(&sg[0], info->wi_base, len);
 359         info->wi_base += len;
 360 
 361         ctxt->rw_nents = 1;
 362 }
 363 
 364 /* Build and DMA-map an SGL that covers part of an xdr_buf's pagelist.
 365  */
 366 static void svc_rdma_pagelist_to_sg(struct svc_rdma_write_info *info,
 367                                     unsigned int remaining,
 368                                     struct svc_rdma_rw_ctxt *ctxt)
 369 {
 370         unsigned int sge_no, sge_bytes, page_off, page_no;
 371         struct xdr_buf *xdr = info->wi_xdr;
 372         struct scatterlist *sg;
 373         struct page **page;
 374 
 375         page_off = info->wi_next_off + xdr->page_base;
 376         page_no = page_off >> PAGE_SHIFT;
 377         page_off = offset_in_page(page_off);
 378         page = xdr->pages + page_no;
 379         info->wi_next_off += remaining;
 380         sg = ctxt->rw_sg_table.sgl;
 381         sge_no = 0;
 382         do {
 383                 sge_bytes = min_t(unsigned int, remaining,
 384                                   PAGE_SIZE - page_off);
 385                 sg_set_page(sg, *page, sge_bytes, page_off);
 386 
 387                 remaining -= sge_bytes;
 388                 sg = sg_next(sg);
 389                 page_off = 0;
 390                 sge_no++;
 391                 page++;
 392         } while (remaining);
 393 
 394         ctxt->rw_nents = sge_no;
 395 }
 396 
 397 /* Construct RDMA Write WRs to send a portion of an xdr_buf containing
 398  * an RPC Reply.
 399  */
 400 static int
 401 svc_rdma_build_writes(struct svc_rdma_write_info *info,
 402                       void (*constructor)(struct svc_rdma_write_info *info,
 403                                           unsigned int len,
 404                                           struct svc_rdma_rw_ctxt *ctxt),
 405                       unsigned int remaining)
 406 {
 407         struct svc_rdma_chunk_ctxt *cc = &info->wi_cc;
 408         struct svcxprt_rdma *rdma = cc->cc_rdma;
 409         struct svc_rdma_rw_ctxt *ctxt;
 410         __be32 *seg;
 411         int ret;
 412 
 413         seg = info->wi_segs + info->wi_seg_no * rpcrdma_segment_maxsz;
 414         do {
 415                 unsigned int write_len;
 416                 u32 seg_length, seg_handle;
 417                 u64 seg_offset;
 418 
 419                 if (info->wi_seg_no >= info->wi_nsegs)
 420                         goto out_overflow;
 421 
 422                 seg_handle = be32_to_cpup(seg);
 423                 seg_length = be32_to_cpup(seg + 1);
 424                 xdr_decode_hyper(seg + 2, &seg_offset);
 425                 seg_offset += info->wi_seg_off;
 426 
 427                 write_len = min(remaining, seg_length - info->wi_seg_off);
 428                 ctxt = svc_rdma_get_rw_ctxt(rdma,
 429                                             (write_len >> PAGE_SHIFT) + 2);
 430                 if (!ctxt)
 431                         goto out_noctx;
 432 
 433                 constructor(info, write_len, ctxt);
 434                 ret = rdma_rw_ctx_init(&ctxt->rw_ctx, rdma->sc_qp,
 435                                        rdma->sc_port_num, ctxt->rw_sg_table.sgl,
 436                                        ctxt->rw_nents, 0, seg_offset,
 437                                        seg_handle, DMA_TO_DEVICE);
 438                 if (ret < 0)
 439                         goto out_initerr;
 440 
 441                 trace_svcrdma_encode_wseg(seg_handle, write_len, seg_offset);
 442                 list_add(&ctxt->rw_list, &cc->cc_rwctxts);
 443                 cc->cc_sqecount += ret;
 444                 if (write_len == seg_length - info->wi_seg_off) {
 445                         seg += 4;
 446                         info->wi_seg_no++;
 447                         info->wi_seg_off = 0;
 448                 } else {
 449                         info->wi_seg_off += write_len;
 450                 }
 451                 remaining -= write_len;
 452         } while (remaining);
 453 
 454         return 0;
 455 
 456 out_overflow:
 457         dprintk("svcrdma: inadequate space in Write chunk (%u)\n",
 458                 info->wi_nsegs);
 459         return -E2BIG;
 460 
 461 out_noctx:
 462         dprintk("svcrdma: no R/W ctxs available\n");
 463         return -ENOMEM;
 464 
 465 out_initerr:
 466         svc_rdma_put_rw_ctxt(rdma, ctxt);
 467         trace_svcrdma_dma_map_rwctx(rdma, ret);
 468         return -EIO;
 469 }
 470 
 471 /* Send one of an xdr_buf's kvecs by itself. To send a Reply
 472  * chunk, the whole RPC Reply is written back to the client.
 473  * This function writes either the head or tail of the xdr_buf
 474  * containing the Reply.
 475  */
 476 static int svc_rdma_send_xdr_kvec(struct svc_rdma_write_info *info,
 477                                   struct kvec *vec)
 478 {
 479         info->wi_base = vec->iov_base;
 480         return svc_rdma_build_writes(info, svc_rdma_vec_to_sg,
 481                                      vec->iov_len);
 482 }
 483 
 484 /* Send an xdr_buf's page list by itself. A Write chunk is
 485  * just the page list. a Reply chunk is the head, page list,
 486  * and tail. This function is shared between the two types
 487  * of chunk.
 488  */
 489 static int svc_rdma_send_xdr_pagelist(struct svc_rdma_write_info *info,
 490                                       struct xdr_buf *xdr)
 491 {
 492         info->wi_xdr = xdr;
 493         info->wi_next_off = 0;
 494         return svc_rdma_build_writes(info, svc_rdma_pagelist_to_sg,
 495                                      xdr->page_len);
 496 }
 497 
 498 /**
 499  * svc_rdma_send_write_chunk - Write all segments in a Write chunk
 500  * @rdma: controlling RDMA transport
 501  * @wr_ch: Write chunk provided by client
 502  * @xdr: xdr_buf containing the data payload
 503  *
 504  * Returns a non-negative number of bytes the chunk consumed, or
 505  *      %-E2BIG if the payload was larger than the Write chunk,
 506  *      %-EINVAL if client provided too many segments,
 507  *      %-ENOMEM if rdma_rw context pool was exhausted,
 508  *      %-ENOTCONN if posting failed (connection is lost),
 509  *      %-EIO if rdma_rw initialization failed (DMA mapping, etc).
 510  */
 511 int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, __be32 *wr_ch,
 512                               struct xdr_buf *xdr)
 513 {
 514         struct svc_rdma_write_info *info;
 515         int ret;
 516 
 517         if (!xdr->page_len)
 518                 return 0;
 519 
 520         info = svc_rdma_write_info_alloc(rdma, wr_ch);
 521         if (!info)
 522                 return -ENOMEM;
 523 
 524         ret = svc_rdma_send_xdr_pagelist(info, xdr);
 525         if (ret < 0)
 526                 goto out_err;
 527 
 528         ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
 529         if (ret < 0)
 530                 goto out_err;
 531 
 532         trace_svcrdma_encode_write(xdr->page_len);
 533         return xdr->page_len;
 534 
 535 out_err:
 536         svc_rdma_write_info_free(info);
 537         return ret;
 538 }
 539 
 540 /**
 541  * svc_rdma_send_reply_chunk - Write all segments in the Reply chunk
 542  * @rdma: controlling RDMA transport
 543  * @rp_ch: Reply chunk provided by client
 544  * @writelist: true if client provided a Write list
 545  * @xdr: xdr_buf containing an RPC Reply
 546  *
 547  * Returns a non-negative number of bytes the chunk consumed, or
 548  *      %-E2BIG if the payload was larger than the Reply chunk,
 549  *      %-EINVAL if client provided too many segments,
 550  *      %-ENOMEM if rdma_rw context pool was exhausted,
 551  *      %-ENOTCONN if posting failed (connection is lost),
 552  *      %-EIO if rdma_rw initialization failed (DMA mapping, etc).
 553  */
 554 int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, __be32 *rp_ch,
 555                               bool writelist, struct xdr_buf *xdr)
 556 {
 557         struct svc_rdma_write_info *info;
 558         int consumed, ret;
 559 
 560         info = svc_rdma_write_info_alloc(rdma, rp_ch);
 561         if (!info)
 562                 return -ENOMEM;
 563 
 564         ret = svc_rdma_send_xdr_kvec(info, &xdr->head[0]);
 565         if (ret < 0)
 566                 goto out_err;
 567         consumed = xdr->head[0].iov_len;
 568 
 569         /* Send the page list in the Reply chunk only if the
 570          * client did not provide Write chunks.
 571          */
 572         if (!writelist && xdr->page_len) {
 573                 ret = svc_rdma_send_xdr_pagelist(info, xdr);
 574                 if (ret < 0)
 575                         goto out_err;
 576                 consumed += xdr->page_len;
 577         }
 578 
 579         if (xdr->tail[0].iov_len) {
 580                 ret = svc_rdma_send_xdr_kvec(info, &xdr->tail[0]);
 581                 if (ret < 0)
 582                         goto out_err;
 583                 consumed += xdr->tail[0].iov_len;
 584         }
 585 
 586         ret = svc_rdma_post_chunk_ctxt(&info->wi_cc);
 587         if (ret < 0)
 588                 goto out_err;
 589 
 590         trace_svcrdma_encode_reply(consumed);
 591         return consumed;
 592 
 593 out_err:
 594         svc_rdma_write_info_free(info);
 595         return ret;
 596 }
 597 
 598 static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
 599                                        struct svc_rqst *rqstp,
 600                                        u32 rkey, u32 len, u64 offset)
 601 {
 602         struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
 603         struct svc_rdma_chunk_ctxt *cc = &info->ri_cc;
 604         struct svc_rdma_rw_ctxt *ctxt;
 605         unsigned int sge_no, seg_len;
 606         struct scatterlist *sg;
 607         int ret;
 608 
 609         sge_no = PAGE_ALIGN(info->ri_pageoff + len) >> PAGE_SHIFT;
 610         ctxt = svc_rdma_get_rw_ctxt(cc->cc_rdma, sge_no);
 611         if (!ctxt)
 612                 goto out_noctx;
 613         ctxt->rw_nents = sge_no;
 614 
 615         sg = ctxt->rw_sg_table.sgl;
 616         for (sge_no = 0; sge_no < ctxt->rw_nents; sge_no++) {
 617                 seg_len = min_t(unsigned int, len,
 618                                 PAGE_SIZE - info->ri_pageoff);
 619 
 620                 head->rc_arg.pages[info->ri_pageno] =
 621                         rqstp->rq_pages[info->ri_pageno];
 622                 if (!info->ri_pageoff)
 623                         head->rc_page_count++;
 624 
 625                 sg_set_page(sg, rqstp->rq_pages[info->ri_pageno],
 626                             seg_len, info->ri_pageoff);
 627                 sg = sg_next(sg);
 628 
 629                 info->ri_pageoff += seg_len;
 630                 if (info->ri_pageoff == PAGE_SIZE) {
 631                         info->ri_pageno++;
 632                         info->ri_pageoff = 0;
 633                 }
 634                 len -= seg_len;
 635 
 636                 /* Safety check */
 637                 if (len &&
 638                     &rqstp->rq_pages[info->ri_pageno + 1] > rqstp->rq_page_end)
 639                         goto out_overrun;
 640         }
 641 
 642         ret = rdma_rw_ctx_init(&ctxt->rw_ctx, cc->cc_rdma->sc_qp,
 643                                cc->cc_rdma->sc_port_num,
 644                                ctxt->rw_sg_table.sgl, ctxt->rw_nents,
 645                                0, offset, rkey, DMA_FROM_DEVICE);
 646         if (ret < 0)
 647                 goto out_initerr;
 648 
 649         list_add(&ctxt->rw_list, &cc->cc_rwctxts);
 650         cc->cc_sqecount += ret;
 651         return 0;
 652 
 653 out_noctx:
 654         dprintk("svcrdma: no R/W ctxs available\n");
 655         return -ENOMEM;
 656 
 657 out_overrun:
 658         dprintk("svcrdma: request overruns rq_pages\n");
 659         return -EINVAL;
 660 
 661 out_initerr:
 662         trace_svcrdma_dma_map_rwctx(cc->cc_rdma, ret);
 663         svc_rdma_put_rw_ctxt(cc->cc_rdma, ctxt);
 664         return -EIO;
 665 }
 666 
 667 /* Walk the segments in the Read chunk starting at @p and construct
 668  * RDMA Read operations to pull the chunk to the server.
 669  */
 670 static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
 671                                      struct svc_rdma_read_info *info,
 672                                      __be32 *p)
 673 {
 674         unsigned int i;
 675         int ret;
 676 
 677         ret = -EINVAL;
 678         info->ri_chunklen = 0;
 679         while (*p++ != xdr_zero && be32_to_cpup(p++) == info->ri_position) {
 680                 u32 rs_handle, rs_length;
 681                 u64 rs_offset;
 682 
 683                 rs_handle = be32_to_cpup(p++);
 684                 rs_length = be32_to_cpup(p++);
 685                 p = xdr_decode_hyper(p, &rs_offset);
 686 
 687                 ret = svc_rdma_build_read_segment(info, rqstp,
 688                                                   rs_handle, rs_length,
 689                                                   rs_offset);
 690                 if (ret < 0)
 691                         break;
 692 
 693                 trace_svcrdma_encode_rseg(rs_handle, rs_length, rs_offset);
 694                 info->ri_chunklen += rs_length;
 695         }
 696 
 697         /* Pages under I/O have been copied to head->rc_pages.
 698          * Prevent their premature release by svc_xprt_release() .
 699          */
 700         for (i = 0; i < info->ri_readctxt->rc_page_count; i++)
 701                 rqstp->rq_pages[i] = NULL;
 702 
 703         return ret;
 704 }
 705 
 706 /* Construct RDMA Reads to pull over a normal Read chunk. The chunk
 707  * data lands in the page list of head->rc_arg.pages.
 708  *
 709  * Currently NFSD does not look at the head->rc_arg.tail[0] iovec.
 710  * Therefore, XDR round-up of the Read chunk and trailing
 711  * inline content must both be added at the end of the pagelist.
 712  */
 713 static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
 714                                             struct svc_rdma_read_info *info,
 715                                             __be32 *p)
 716 {
 717         struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
 718         int ret;
 719 
 720         ret = svc_rdma_build_read_chunk(rqstp, info, p);
 721         if (ret < 0)
 722                 goto out;
 723 
 724         trace_svcrdma_encode_read(info->ri_chunklen, info->ri_position);
 725 
 726         head->rc_hdr_count = 0;
 727 
 728         /* Split the Receive buffer between the head and tail
 729          * buffers at Read chunk's position. XDR roundup of the
 730          * chunk is not included in either the pagelist or in
 731          * the tail.
 732          */
 733         head->rc_arg.tail[0].iov_base =
 734                 head->rc_arg.head[0].iov_base + info->ri_position;
 735         head->rc_arg.tail[0].iov_len =
 736                 head->rc_arg.head[0].iov_len - info->ri_position;
 737         head->rc_arg.head[0].iov_len = info->ri_position;
 738 
 739         /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
 740          *
 741          * If the client already rounded up the chunk length, the
 742          * length does not change. Otherwise, the length of the page
 743          * list is increased to include XDR round-up.
 744          *
 745          * Currently these chunks always start at page offset 0,
 746          * thus the rounded-up length never crosses a page boundary.
 747          */
 748         info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
 749 
 750         head->rc_arg.page_len = info->ri_chunklen;
 751         head->rc_arg.len += info->ri_chunklen;
 752         head->rc_arg.buflen += info->ri_chunklen;
 753 
 754 out:
 755         return ret;
 756 }
 757 
 758 /* Construct RDMA Reads to pull over a Position Zero Read chunk.
 759  * The start of the data lands in the first page just after
 760  * the Transport header, and the rest lands in the page list of
 761  * head->rc_arg.pages.
 762  *
 763  * Assumptions:
 764  *      - A PZRC has an XDR-aligned length (no implicit round-up).
 765  *      - There can be no trailing inline content (IOW, we assume
 766  *        a PZRC is never sent in an RDMA_MSG message, though it's
 767  *        allowed by spec).
 768  */
 769 static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp,
 770                                         struct svc_rdma_read_info *info,
 771                                         __be32 *p)
 772 {
 773         struct svc_rdma_recv_ctxt *head = info->ri_readctxt;
 774         int ret;
 775 
 776         ret = svc_rdma_build_read_chunk(rqstp, info, p);
 777         if (ret < 0)
 778                 goto out;
 779 
 780         trace_svcrdma_encode_pzr(info->ri_chunklen);
 781 
 782         head->rc_arg.len += info->ri_chunklen;
 783         head->rc_arg.buflen += info->ri_chunklen;
 784 
 785         head->rc_hdr_count = 1;
 786         head->rc_arg.head[0].iov_base = page_address(head->rc_pages[0]);
 787         head->rc_arg.head[0].iov_len = min_t(size_t, PAGE_SIZE,
 788                                              info->ri_chunklen);
 789 
 790         head->rc_arg.page_len = info->ri_chunklen -
 791                                 head->rc_arg.head[0].iov_len;
 792 
 793 out:
 794         return ret;
 795 }
 796 
 797 /**
 798  * svc_rdma_recv_read_chunk - Pull a Read chunk from the client
 799  * @rdma: controlling RDMA transport
 800  * @rqstp: set of pages to use as Read sink buffers
 801  * @head: pages under I/O collect here
 802  * @p: pointer to start of Read chunk
 803  *
 804  * Returns:
 805  *      %0 if all needed RDMA Reads were posted successfully,
 806  *      %-EINVAL if client provided too many segments,
 807  *      %-ENOMEM if rdma_rw context pool was exhausted,
 808  *      %-ENOTCONN if posting failed (connection is lost),
 809  *      %-EIO if rdma_rw initialization failed (DMA mapping, etc).
 810  *
 811  * Assumptions:
 812  * - All Read segments in @p have the same Position value.
 813  */
 814 int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
 815                              struct svc_rdma_recv_ctxt *head, __be32 *p)
 816 {
 817         struct svc_rdma_read_info *info;
 818         int ret;
 819 
 820         /* The request (with page list) is constructed in
 821          * head->rc_arg. Pages involved with RDMA Read I/O are
 822          * transferred there.
 823          */
 824         head->rc_arg.head[0] = rqstp->rq_arg.head[0];
 825         head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
 826         head->rc_arg.pages = head->rc_pages;
 827         head->rc_arg.page_base = 0;
 828         head->rc_arg.page_len = 0;
 829         head->rc_arg.len = rqstp->rq_arg.len;
 830         head->rc_arg.buflen = rqstp->rq_arg.buflen;
 831 
 832         info = svc_rdma_read_info_alloc(rdma);
 833         if (!info)
 834                 return -ENOMEM;
 835         info->ri_readctxt = head;
 836         info->ri_pageno = 0;
 837         info->ri_pageoff = 0;
 838 
 839         info->ri_position = be32_to_cpup(p + 1);
 840         if (info->ri_position)
 841                 ret = svc_rdma_build_normal_read_chunk(rqstp, info, p);
 842         else
 843                 ret = svc_rdma_build_pz_read_chunk(rqstp, info, p);
 844         if (ret < 0)
 845                 goto out_err;
 846 
 847         ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
 848         if (ret < 0)
 849                 goto out_err;
 850         return 0;
 851 
 852 out_err:
 853         svc_rdma_read_info_free(info);
 854         return ret;
 855 }

/* [<][>][^][v][top][bottom][index][help] */