root/lib/scatterlist.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sg_next
  2. sg_nents
  3. sg_nents_for_len
  4. sg_last
  5. sg_init_table
  6. sg_init_one
  7. sg_kmalloc
  8. sg_kfree
  9. __sg_free_table
  10. sg_free_table
  11. __sg_alloc_table
  12. sg_alloc_table
  13. __sg_alloc_table_from_pages
  14. sg_alloc_table_from_pages
  15. sgl_alloc_order
  16. sgl_alloc
  17. sgl_free_n_order
  18. sgl_free_order
  19. sgl_free
  20. __sg_page_iter_start
  21. sg_page_count
  22. __sg_page_iter_next
  23. sg_dma_page_count
  24. __sg_page_iter_dma_next
  25. sg_miter_start
  26. sg_miter_get_next_page
  27. sg_miter_skip
  28. sg_miter_next
  29. sg_miter_stop
  30. sg_copy_buffer
  31. sg_copy_from_buffer
  32. sg_copy_to_buffer
  33. sg_pcopy_from_buffer
  34. sg_pcopy_to_buffer
  35. sg_zero_buffer

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
   4  *
   5  * Scatterlist handling helpers.
   6  */
   7 #include <linux/export.h>
   8 #include <linux/slab.h>
   9 #include <linux/scatterlist.h>
  10 #include <linux/highmem.h>
  11 #include <linux/kmemleak.h>
  12 
  13 /**
  14  * sg_next - return the next scatterlist entry in a list
  15  * @sg:         The current sg entry
  16  *
  17  * Description:
  18  *   Usually the next entry will be @sg@ + 1, but if this sg element is part
  19  *   of a chained scatterlist, it could jump to the start of a new
  20  *   scatterlist array.
  21  *
  22  **/
  23 struct scatterlist *sg_next(struct scatterlist *sg)
  24 {
  25         if (sg_is_last(sg))
  26                 return NULL;
  27 
  28         sg++;
  29         if (unlikely(sg_is_chain(sg)))
  30                 sg = sg_chain_ptr(sg);
  31 
  32         return sg;
  33 }
  34 EXPORT_SYMBOL(sg_next);
  35 
  36 /**
  37  * sg_nents - return total count of entries in scatterlist
  38  * @sg:         The scatterlist
  39  *
  40  * Description:
  41  * Allows to know how many entries are in sg, taking into acount
  42  * chaining as well
  43  *
  44  **/
  45 int sg_nents(struct scatterlist *sg)
  46 {
  47         int nents;
  48         for (nents = 0; sg; sg = sg_next(sg))
  49                 nents++;
  50         return nents;
  51 }
  52 EXPORT_SYMBOL(sg_nents);
  53 
  54 /**
  55  * sg_nents_for_len - return total count of entries in scatterlist
  56  *                    needed to satisfy the supplied length
  57  * @sg:         The scatterlist
  58  * @len:        The total required length
  59  *
  60  * Description:
  61  * Determines the number of entries in sg that are required to meet
  62  * the supplied length, taking into acount chaining as well
  63  *
  64  * Returns:
  65  *   the number of sg entries needed, negative error on failure
  66  *
  67  **/
  68 int sg_nents_for_len(struct scatterlist *sg, u64 len)
  69 {
  70         int nents;
  71         u64 total;
  72 
  73         if (!len)
  74                 return 0;
  75 
  76         for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
  77                 nents++;
  78                 total += sg->length;
  79                 if (total >= len)
  80                         return nents;
  81         }
  82 
  83         return -EINVAL;
  84 }
  85 EXPORT_SYMBOL(sg_nents_for_len);
  86 
  87 /**
  88  * sg_last - return the last scatterlist entry in a list
  89  * @sgl:        First entry in the scatterlist
  90  * @nents:      Number of entries in the scatterlist
  91  *
  92  * Description:
  93  *   Should only be used casually, it (currently) scans the entire list
  94  *   to get the last entry.
  95  *
  96  *   Note that the @sgl@ pointer passed in need not be the first one,
  97  *   the important bit is that @nents@ denotes the number of entries that
  98  *   exist from @sgl@.
  99  *
 100  **/
 101 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
 102 {
 103         struct scatterlist *sg, *ret = NULL;
 104         unsigned int i;
 105 
 106         for_each_sg(sgl, sg, nents, i)
 107                 ret = sg;
 108 
 109         BUG_ON(!sg_is_last(ret));
 110         return ret;
 111 }
 112 EXPORT_SYMBOL(sg_last);
 113 
 114 /**
 115  * sg_init_table - Initialize SG table
 116  * @sgl:           The SG table
 117  * @nents:         Number of entries in table
 118  *
 119  * Notes:
 120  *   If this is part of a chained sg table, sg_mark_end() should be
 121  *   used only on the last table part.
 122  *
 123  **/
 124 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
 125 {
 126         memset(sgl, 0, sizeof(*sgl) * nents);
 127         sg_init_marker(sgl, nents);
 128 }
 129 EXPORT_SYMBOL(sg_init_table);
 130 
 131 /**
 132  * sg_init_one - Initialize a single entry sg list
 133  * @sg:          SG entry
 134  * @buf:         Virtual address for IO
 135  * @buflen:      IO length
 136  *
 137  **/
 138 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
 139 {
 140         sg_init_table(sg, 1);
 141         sg_set_buf(sg, buf, buflen);
 142 }
 143 EXPORT_SYMBOL(sg_init_one);
 144 
 145 /*
 146  * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
 147  * helpers.
 148  */
 149 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
 150 {
 151         if (nents == SG_MAX_SINGLE_ALLOC) {
 152                 /*
 153                  * Kmemleak doesn't track page allocations as they are not
 154                  * commonly used (in a raw form) for kernel data structures.
 155                  * As we chain together a list of pages and then a normal
 156                  * kmalloc (tracked by kmemleak), in order to for that last
 157                  * allocation not to become decoupled (and thus a
 158                  * false-positive) we need to inform kmemleak of all the
 159                  * intermediate allocations.
 160                  */
 161                 void *ptr = (void *) __get_free_page(gfp_mask);
 162                 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
 163                 return ptr;
 164         } else
 165                 return kmalloc_array(nents, sizeof(struct scatterlist),
 166                                      gfp_mask);
 167 }
 168 
 169 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
 170 {
 171         if (nents == SG_MAX_SINGLE_ALLOC) {
 172                 kmemleak_free(sg);
 173                 free_page((unsigned long) sg);
 174         } else
 175                 kfree(sg);
 176 }
 177 
 178 /**
 179  * __sg_free_table - Free a previously mapped sg table
 180  * @table:      The sg table header to use
 181  * @max_ents:   The maximum number of entries per single scatterlist
 182  * @nents_first_chunk: Number of entries int the (preallocated) first
 183  *      scatterlist chunk, 0 means no such preallocated first chunk
 184  * @free_fn:    Free function
 185  *
 186  *  Description:
 187  *    Free an sg table previously allocated and setup with
 188  *    __sg_alloc_table().  The @max_ents value must be identical to
 189  *    that previously used with __sg_alloc_table().
 190  *
 191  **/
 192 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
 193                      unsigned int nents_first_chunk, sg_free_fn *free_fn)
 194 {
 195         struct scatterlist *sgl, *next;
 196         unsigned curr_max_ents = nents_first_chunk ?: max_ents;
 197 
 198         if (unlikely(!table->sgl))
 199                 return;
 200 
 201         sgl = table->sgl;
 202         while (table->orig_nents) {
 203                 unsigned int alloc_size = table->orig_nents;
 204                 unsigned int sg_size;
 205 
 206                 /*
 207                  * If we have more than max_ents segments left,
 208                  * then assign 'next' to the sg table after the current one.
 209                  * sg_size is then one less than alloc size, since the last
 210                  * element is the chain pointer.
 211                  */
 212                 if (alloc_size > curr_max_ents) {
 213                         next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
 214                         alloc_size = curr_max_ents;
 215                         sg_size = alloc_size - 1;
 216                 } else {
 217                         sg_size = alloc_size;
 218                         next = NULL;
 219                 }
 220 
 221                 table->orig_nents -= sg_size;
 222                 if (nents_first_chunk)
 223                         nents_first_chunk = 0;
 224                 else
 225                         free_fn(sgl, alloc_size);
 226                 sgl = next;
 227                 curr_max_ents = max_ents;
 228         }
 229 
 230         table->sgl = NULL;
 231 }
 232 EXPORT_SYMBOL(__sg_free_table);
 233 
 234 /**
 235  * sg_free_table - Free a previously allocated sg table
 236  * @table:      The mapped sg table header
 237  *
 238  **/
 239 void sg_free_table(struct sg_table *table)
 240 {
 241         __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
 242 }
 243 EXPORT_SYMBOL(sg_free_table);
 244 
 245 /**
 246  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
 247  * @table:      The sg table header to use
 248  * @nents:      Number of entries in sg list
 249  * @max_ents:   The maximum number of entries the allocator returns per call
 250  * @nents_first_chunk: Number of entries int the (preallocated) first
 251  *      scatterlist chunk, 0 means no such preallocated chunk provided by user
 252  * @gfp_mask:   GFP allocation mask
 253  * @alloc_fn:   Allocator to use
 254  *
 255  * Description:
 256  *   This function returns a @table @nents long. The allocator is
 257  *   defined to return scatterlist chunks of maximum size @max_ents.
 258  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
 259  *   chained in units of @max_ents.
 260  *
 261  * Notes:
 262  *   If this function returns non-0 (eg failure), the caller must call
 263  *   __sg_free_table() to cleanup any leftover allocations.
 264  *
 265  **/
 266 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
 267                      unsigned int max_ents, struct scatterlist *first_chunk,
 268                      unsigned int nents_first_chunk, gfp_t gfp_mask,
 269                      sg_alloc_fn *alloc_fn)
 270 {
 271         struct scatterlist *sg, *prv;
 272         unsigned int left;
 273         unsigned curr_max_ents = nents_first_chunk ?: max_ents;
 274         unsigned prv_max_ents;
 275 
 276         memset(table, 0, sizeof(*table));
 277 
 278         if (nents == 0)
 279                 return -EINVAL;
 280 #ifdef CONFIG_ARCH_NO_SG_CHAIN
 281         if (WARN_ON_ONCE(nents > max_ents))
 282                 return -EINVAL;
 283 #endif
 284 
 285         left = nents;
 286         prv = NULL;
 287         do {
 288                 unsigned int sg_size, alloc_size = left;
 289 
 290                 if (alloc_size > curr_max_ents) {
 291                         alloc_size = curr_max_ents;
 292                         sg_size = alloc_size - 1;
 293                 } else
 294                         sg_size = alloc_size;
 295 
 296                 left -= sg_size;
 297 
 298                 if (first_chunk) {
 299                         sg = first_chunk;
 300                         first_chunk = NULL;
 301                 } else {
 302                         sg = alloc_fn(alloc_size, gfp_mask);
 303                 }
 304                 if (unlikely(!sg)) {
 305                         /*
 306                          * Adjust entry count to reflect that the last
 307                          * entry of the previous table won't be used for
 308                          * linkage.  Without this, sg_kfree() may get
 309                          * confused.
 310                          */
 311                         if (prv)
 312                                 table->nents = ++table->orig_nents;
 313 
 314                         return -ENOMEM;
 315                 }
 316 
 317                 sg_init_table(sg, alloc_size);
 318                 table->nents = table->orig_nents += sg_size;
 319 
 320                 /*
 321                  * If this is the first mapping, assign the sg table header.
 322                  * If this is not the first mapping, chain previous part.
 323                  */
 324                 if (prv)
 325                         sg_chain(prv, prv_max_ents, sg);
 326                 else
 327                         table->sgl = sg;
 328 
 329                 /*
 330                  * If no more entries after this one, mark the end
 331                  */
 332                 if (!left)
 333                         sg_mark_end(&sg[sg_size - 1]);
 334 
 335                 prv = sg;
 336                 prv_max_ents = curr_max_ents;
 337                 curr_max_ents = max_ents;
 338         } while (left);
 339 
 340         return 0;
 341 }
 342 EXPORT_SYMBOL(__sg_alloc_table);
 343 
 344 /**
 345  * sg_alloc_table - Allocate and initialize an sg table
 346  * @table:      The sg table header to use
 347  * @nents:      Number of entries in sg list
 348  * @gfp_mask:   GFP allocation mask
 349  *
 350  *  Description:
 351  *    Allocate and initialize an sg table. If @nents@ is larger than
 352  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
 353  *
 354  **/
 355 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
 356 {
 357         int ret;
 358 
 359         ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
 360                                NULL, 0, gfp_mask, sg_kmalloc);
 361         if (unlikely(ret))
 362                 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
 363 
 364         return ret;
 365 }
 366 EXPORT_SYMBOL(sg_alloc_table);
 367 
 368 /**
 369  * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
 370  *                               an array of pages
 371  * @sgt:         The sg table header to use
 372  * @pages:       Pointer to an array of page pointers
 373  * @n_pages:     Number of pages in the pages array
 374  * @offset:      Offset from start of the first page to the start of a buffer
 375  * @size:        Number of valid bytes in the buffer (after offset)
 376  * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
 377  * @gfp_mask:    GFP allocation mask
 378  *
 379  *  Description:
 380  *    Allocate and initialize an sg table from a list of pages. Contiguous
 381  *    ranges of the pages are squashed into a single scatterlist node up to the
 382  *    maximum size specified in @max_segment. An user may provide an offset at a
 383  *    start and a size of valid data in a buffer specified by the page array.
 384  *    The returned sg table is released by sg_free_table.
 385  *
 386  * Returns:
 387  *   0 on success, negative error on failure
 388  */
 389 int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
 390                                 unsigned int n_pages, unsigned int offset,
 391                                 unsigned long size, unsigned int max_segment,
 392                                 gfp_t gfp_mask)
 393 {
 394         unsigned int chunks, cur_page, seg_len, i;
 395         int ret;
 396         struct scatterlist *s;
 397 
 398         if (WARN_ON(!max_segment || offset_in_page(max_segment)))
 399                 return -EINVAL;
 400 
 401         /* compute number of contiguous chunks */
 402         chunks = 1;
 403         seg_len = 0;
 404         for (i = 1; i < n_pages; i++) {
 405                 seg_len += PAGE_SIZE;
 406                 if (seg_len >= max_segment ||
 407                     page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
 408                         chunks++;
 409                         seg_len = 0;
 410                 }
 411         }
 412 
 413         ret = sg_alloc_table(sgt, chunks, gfp_mask);
 414         if (unlikely(ret))
 415                 return ret;
 416 
 417         /* merging chunks and putting them into the scatterlist */
 418         cur_page = 0;
 419         for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
 420                 unsigned int j, chunk_size;
 421 
 422                 /* look for the end of the current chunk */
 423                 seg_len = 0;
 424                 for (j = cur_page + 1; j < n_pages; j++) {
 425                         seg_len += PAGE_SIZE;
 426                         if (seg_len >= max_segment ||
 427                             page_to_pfn(pages[j]) !=
 428                             page_to_pfn(pages[j - 1]) + 1)
 429                                 break;
 430                 }
 431 
 432                 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
 433                 sg_set_page(s, pages[cur_page],
 434                             min_t(unsigned long, size, chunk_size), offset);
 435                 size -= chunk_size;
 436                 offset = 0;
 437                 cur_page = j;
 438         }
 439 
 440         return 0;
 441 }
 442 EXPORT_SYMBOL(__sg_alloc_table_from_pages);
 443 
 444 /**
 445  * sg_alloc_table_from_pages - Allocate and initialize an sg table from
 446  *                             an array of pages
 447  * @sgt:         The sg table header to use
 448  * @pages:       Pointer to an array of page pointers
 449  * @n_pages:     Number of pages in the pages array
 450  * @offset:      Offset from start of the first page to the start of a buffer
 451  * @size:        Number of valid bytes in the buffer (after offset)
 452  * @gfp_mask:    GFP allocation mask
 453  *
 454  *  Description:
 455  *    Allocate and initialize an sg table from a list of pages. Contiguous
 456  *    ranges of the pages are squashed into a single scatterlist node. A user
 457  *    may provide an offset at a start and a size of valid data in a buffer
 458  *    specified by the page array. The returned sg table is released by
 459  *    sg_free_table.
 460  *
 461  * Returns:
 462  *   0 on success, negative error on failure
 463  */
 464 int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
 465                               unsigned int n_pages, unsigned int offset,
 466                               unsigned long size, gfp_t gfp_mask)
 467 {
 468         return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
 469                                            SCATTERLIST_MAX_SEGMENT, gfp_mask);
 470 }
 471 EXPORT_SYMBOL(sg_alloc_table_from_pages);
 472 
 473 #ifdef CONFIG_SGL_ALLOC
 474 
 475 /**
 476  * sgl_alloc_order - allocate a scatterlist and its pages
 477  * @length: Length in bytes of the scatterlist. Must be at least one
 478  * @order: Second argument for alloc_pages()
 479  * @chainable: Whether or not to allocate an extra element in the scatterlist
 480  *      for scatterlist chaining purposes
 481  * @gfp: Memory allocation flags
 482  * @nent_p: [out] Number of entries in the scatterlist that have pages
 483  *
 484  * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
 485  */
 486 struct scatterlist *sgl_alloc_order(unsigned long long length,
 487                                     unsigned int order, bool chainable,
 488                                     gfp_t gfp, unsigned int *nent_p)
 489 {
 490         struct scatterlist *sgl, *sg;
 491         struct page *page;
 492         unsigned int nent, nalloc;
 493         u32 elem_len;
 494 
 495         nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
 496         /* Check for integer overflow */
 497         if (length > (nent << (PAGE_SHIFT + order)))
 498                 return NULL;
 499         nalloc = nent;
 500         if (chainable) {
 501                 /* Check for integer overflow */
 502                 if (nalloc + 1 < nalloc)
 503                         return NULL;
 504                 nalloc++;
 505         }
 506         sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
 507                             (gfp & ~GFP_DMA) | __GFP_ZERO);
 508         if (!sgl)
 509                 return NULL;
 510 
 511         sg_init_table(sgl, nalloc);
 512         sg = sgl;
 513         while (length) {
 514                 elem_len = min_t(u64, length, PAGE_SIZE << order);
 515                 page = alloc_pages(gfp, order);
 516                 if (!page) {
 517                         sgl_free(sgl);
 518                         return NULL;
 519                 }
 520 
 521                 sg_set_page(sg, page, elem_len, 0);
 522                 length -= elem_len;
 523                 sg = sg_next(sg);
 524         }
 525         WARN_ONCE(length, "length = %lld\n", length);
 526         if (nent_p)
 527                 *nent_p = nent;
 528         return sgl;
 529 }
 530 EXPORT_SYMBOL(sgl_alloc_order);
 531 
 532 /**
 533  * sgl_alloc - allocate a scatterlist and its pages
 534  * @length: Length in bytes of the scatterlist
 535  * @gfp: Memory allocation flags
 536  * @nent_p: [out] Number of entries in the scatterlist
 537  *
 538  * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
 539  */
 540 struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
 541                               unsigned int *nent_p)
 542 {
 543         return sgl_alloc_order(length, 0, false, gfp, nent_p);
 544 }
 545 EXPORT_SYMBOL(sgl_alloc);
 546 
 547 /**
 548  * sgl_free_n_order - free a scatterlist and its pages
 549  * @sgl: Scatterlist with one or more elements
 550  * @nents: Maximum number of elements to free
 551  * @order: Second argument for __free_pages()
 552  *
 553  * Notes:
 554  * - If several scatterlists have been chained and each chain element is
 555  *   freed separately then it's essential to set nents correctly to avoid that a
 556  *   page would get freed twice.
 557  * - All pages in a chained scatterlist can be freed at once by setting @nents
 558  *   to a high number.
 559  */
 560 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
 561 {
 562         struct scatterlist *sg;
 563         struct page *page;
 564         int i;
 565 
 566         for_each_sg(sgl, sg, nents, i) {
 567                 if (!sg)
 568                         break;
 569                 page = sg_page(sg);
 570                 if (page)
 571                         __free_pages(page, order);
 572         }
 573         kfree(sgl);
 574 }
 575 EXPORT_SYMBOL(sgl_free_n_order);
 576 
 577 /**
 578  * sgl_free_order - free a scatterlist and its pages
 579  * @sgl: Scatterlist with one or more elements
 580  * @order: Second argument for __free_pages()
 581  */
 582 void sgl_free_order(struct scatterlist *sgl, int order)
 583 {
 584         sgl_free_n_order(sgl, INT_MAX, order);
 585 }
 586 EXPORT_SYMBOL(sgl_free_order);
 587 
 588 /**
 589  * sgl_free - free a scatterlist and its pages
 590  * @sgl: Scatterlist with one or more elements
 591  */
 592 void sgl_free(struct scatterlist *sgl)
 593 {
 594         sgl_free_order(sgl, 0);
 595 }
 596 EXPORT_SYMBOL(sgl_free);
 597 
 598 #endif /* CONFIG_SGL_ALLOC */
 599 
 600 void __sg_page_iter_start(struct sg_page_iter *piter,
 601                           struct scatterlist *sglist, unsigned int nents,
 602                           unsigned long pgoffset)
 603 {
 604         piter->__pg_advance = 0;
 605         piter->__nents = nents;
 606 
 607         piter->sg = sglist;
 608         piter->sg_pgoffset = pgoffset;
 609 }
 610 EXPORT_SYMBOL(__sg_page_iter_start);
 611 
 612 static int sg_page_count(struct scatterlist *sg)
 613 {
 614         return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
 615 }
 616 
 617 bool __sg_page_iter_next(struct sg_page_iter *piter)
 618 {
 619         if (!piter->__nents || !piter->sg)
 620                 return false;
 621 
 622         piter->sg_pgoffset += piter->__pg_advance;
 623         piter->__pg_advance = 1;
 624 
 625         while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
 626                 piter->sg_pgoffset -= sg_page_count(piter->sg);
 627                 piter->sg = sg_next(piter->sg);
 628                 if (!--piter->__nents || !piter->sg)
 629                         return false;
 630         }
 631 
 632         return true;
 633 }
 634 EXPORT_SYMBOL(__sg_page_iter_next);
 635 
 636 static int sg_dma_page_count(struct scatterlist *sg)
 637 {
 638         return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
 639 }
 640 
 641 bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
 642 {
 643         struct sg_page_iter *piter = &dma_iter->base;
 644 
 645         if (!piter->__nents || !piter->sg)
 646                 return false;
 647 
 648         piter->sg_pgoffset += piter->__pg_advance;
 649         piter->__pg_advance = 1;
 650 
 651         while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
 652                 piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
 653                 piter->sg = sg_next(piter->sg);
 654                 if (!--piter->__nents || !piter->sg)
 655                         return false;
 656         }
 657 
 658         return true;
 659 }
 660 EXPORT_SYMBOL(__sg_page_iter_dma_next);
 661 
 662 /**
 663  * sg_miter_start - start mapping iteration over a sg list
 664  * @miter: sg mapping iter to be started
 665  * @sgl: sg list to iterate over
 666  * @nents: number of sg entries
 667  *
 668  * Description:
 669  *   Starts mapping iterator @miter.
 670  *
 671  * Context:
 672  *   Don't care.
 673  */
 674 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
 675                     unsigned int nents, unsigned int flags)
 676 {
 677         memset(miter, 0, sizeof(struct sg_mapping_iter));
 678 
 679         __sg_page_iter_start(&miter->piter, sgl, nents, 0);
 680         WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
 681         miter->__flags = flags;
 682 }
 683 EXPORT_SYMBOL(sg_miter_start);
 684 
 685 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
 686 {
 687         if (!miter->__remaining) {
 688                 struct scatterlist *sg;
 689 
 690                 if (!__sg_page_iter_next(&miter->piter))
 691                         return false;
 692 
 693                 sg = miter->piter.sg;
 694 
 695                 miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
 696                 miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
 697                 miter->__offset &= PAGE_SIZE - 1;
 698                 miter->__remaining = sg->offset + sg->length -
 699                                      (miter->piter.sg_pgoffset << PAGE_SHIFT) -
 700                                      miter->__offset;
 701                 miter->__remaining = min_t(unsigned long, miter->__remaining,
 702                                            PAGE_SIZE - miter->__offset);
 703         }
 704 
 705         return true;
 706 }
 707 
 708 /**
 709  * sg_miter_skip - reposition mapping iterator
 710  * @miter: sg mapping iter to be skipped
 711  * @offset: number of bytes to plus the current location
 712  *
 713  * Description:
 714  *   Sets the offset of @miter to its current location plus @offset bytes.
 715  *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
 716  *   stops @miter.
 717  *
 718  * Context:
 719  *   Don't care if @miter is stopped, or not proceeded yet.
 720  *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
 721  *
 722  * Returns:
 723  *   true if @miter contains the valid mapping.  false if end of sg
 724  *   list is reached.
 725  */
 726 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
 727 {
 728         sg_miter_stop(miter);
 729 
 730         while (offset) {
 731                 off_t consumed;
 732 
 733                 if (!sg_miter_get_next_page(miter))
 734                         return false;
 735 
 736                 consumed = min_t(off_t, offset, miter->__remaining);
 737                 miter->__offset += consumed;
 738                 miter->__remaining -= consumed;
 739                 offset -= consumed;
 740         }
 741 
 742         return true;
 743 }
 744 EXPORT_SYMBOL(sg_miter_skip);
 745 
 746 /**
 747  * sg_miter_next - proceed mapping iterator to the next mapping
 748  * @miter: sg mapping iter to proceed
 749  *
 750  * Description:
 751  *   Proceeds @miter to the next mapping.  @miter should have been started
 752  *   using sg_miter_start().  On successful return, @miter->page,
 753  *   @miter->addr and @miter->length point to the current mapping.
 754  *
 755  * Context:
 756  *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
 757  *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
 758  *
 759  * Returns:
 760  *   true if @miter contains the next mapping.  false if end of sg
 761  *   list is reached.
 762  */
 763 bool sg_miter_next(struct sg_mapping_iter *miter)
 764 {
 765         sg_miter_stop(miter);
 766 
 767         /*
 768          * Get to the next page if necessary.
 769          * __remaining, __offset is adjusted by sg_miter_stop
 770          */
 771         if (!sg_miter_get_next_page(miter))
 772                 return false;
 773 
 774         miter->page = sg_page_iter_page(&miter->piter);
 775         miter->consumed = miter->length = miter->__remaining;
 776 
 777         if (miter->__flags & SG_MITER_ATOMIC)
 778                 miter->addr = kmap_atomic(miter->page) + miter->__offset;
 779         else
 780                 miter->addr = kmap(miter->page) + miter->__offset;
 781 
 782         return true;
 783 }
 784 EXPORT_SYMBOL(sg_miter_next);
 785 
 786 /**
 787  * sg_miter_stop - stop mapping iteration
 788  * @miter: sg mapping iter to be stopped
 789  *
 790  * Description:
 791  *   Stops mapping iterator @miter.  @miter should have been started
 792  *   using sg_miter_start().  A stopped iteration can be resumed by
 793  *   calling sg_miter_next() on it.  This is useful when resources (kmap)
 794  *   need to be released during iteration.
 795  *
 796  * Context:
 797  *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
 798  *   otherwise.
 799  */
 800 void sg_miter_stop(struct sg_mapping_iter *miter)
 801 {
 802         WARN_ON(miter->consumed > miter->length);
 803 
 804         /* drop resources from the last iteration */
 805         if (miter->addr) {
 806                 miter->__offset += miter->consumed;
 807                 miter->__remaining -= miter->consumed;
 808 
 809                 if ((miter->__flags & SG_MITER_TO_SG) &&
 810                     !PageSlab(miter->page))
 811                         flush_kernel_dcache_page(miter->page);
 812 
 813                 if (miter->__flags & SG_MITER_ATOMIC) {
 814                         WARN_ON_ONCE(preemptible());
 815                         kunmap_atomic(miter->addr);
 816                 } else
 817                         kunmap(miter->page);
 818 
 819                 miter->page = NULL;
 820                 miter->addr = NULL;
 821                 miter->length = 0;
 822                 miter->consumed = 0;
 823         }
 824 }
 825 EXPORT_SYMBOL(sg_miter_stop);
 826 
 827 /**
 828  * sg_copy_buffer - Copy data between a linear buffer and an SG list
 829  * @sgl:                 The SG list
 830  * @nents:               Number of SG entries
 831  * @buf:                 Where to copy from
 832  * @buflen:              The number of bytes to copy
 833  * @skip:                Number of bytes to skip before copying
 834  * @to_buffer:           transfer direction (true == from an sg list to a
 835  *                       buffer, false == from a buffer to an sg list
 836  *
 837  * Returns the number of copied bytes.
 838  *
 839  **/
 840 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
 841                       size_t buflen, off_t skip, bool to_buffer)
 842 {
 843         unsigned int offset = 0;
 844         struct sg_mapping_iter miter;
 845         unsigned int sg_flags = SG_MITER_ATOMIC;
 846 
 847         if (to_buffer)
 848                 sg_flags |= SG_MITER_FROM_SG;
 849         else
 850                 sg_flags |= SG_MITER_TO_SG;
 851 
 852         sg_miter_start(&miter, sgl, nents, sg_flags);
 853 
 854         if (!sg_miter_skip(&miter, skip))
 855                 return false;
 856 
 857         while ((offset < buflen) && sg_miter_next(&miter)) {
 858                 unsigned int len;
 859 
 860                 len = min(miter.length, buflen - offset);
 861 
 862                 if (to_buffer)
 863                         memcpy(buf + offset, miter.addr, len);
 864                 else
 865                         memcpy(miter.addr, buf + offset, len);
 866 
 867                 offset += len;
 868         }
 869 
 870         sg_miter_stop(&miter);
 871 
 872         return offset;
 873 }
 874 EXPORT_SYMBOL(sg_copy_buffer);
 875 
 876 /**
 877  * sg_copy_from_buffer - Copy from a linear buffer to an SG list
 878  * @sgl:                 The SG list
 879  * @nents:               Number of SG entries
 880  * @buf:                 Where to copy from
 881  * @buflen:              The number of bytes to copy
 882  *
 883  * Returns the number of copied bytes.
 884  *
 885  **/
 886 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
 887                            const void *buf, size_t buflen)
 888 {
 889         return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
 890 }
 891 EXPORT_SYMBOL(sg_copy_from_buffer);
 892 
 893 /**
 894  * sg_copy_to_buffer - Copy from an SG list to a linear buffer
 895  * @sgl:                 The SG list
 896  * @nents:               Number of SG entries
 897  * @buf:                 Where to copy to
 898  * @buflen:              The number of bytes to copy
 899  *
 900  * Returns the number of copied bytes.
 901  *
 902  **/
 903 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
 904                          void *buf, size_t buflen)
 905 {
 906         return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
 907 }
 908 EXPORT_SYMBOL(sg_copy_to_buffer);
 909 
 910 /**
 911  * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
 912  * @sgl:                 The SG list
 913  * @nents:               Number of SG entries
 914  * @buf:                 Where to copy from
 915  * @buflen:              The number of bytes to copy
 916  * @skip:                Number of bytes to skip before copying
 917  *
 918  * Returns the number of copied bytes.
 919  *
 920  **/
 921 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
 922                             const void *buf, size_t buflen, off_t skip)
 923 {
 924         return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
 925 }
 926 EXPORT_SYMBOL(sg_pcopy_from_buffer);
 927 
 928 /**
 929  * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
 930  * @sgl:                 The SG list
 931  * @nents:               Number of SG entries
 932  * @buf:                 Where to copy to
 933  * @buflen:              The number of bytes to copy
 934  * @skip:                Number of bytes to skip before copying
 935  *
 936  * Returns the number of copied bytes.
 937  *
 938  **/
 939 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
 940                           void *buf, size_t buflen, off_t skip)
 941 {
 942         return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
 943 }
 944 EXPORT_SYMBOL(sg_pcopy_to_buffer);
 945 
 946 /**
 947  * sg_zero_buffer - Zero-out a part of a SG list
 948  * @sgl:                 The SG list
 949  * @nents:               Number of SG entries
 950  * @buflen:              The number of bytes to zero out
 951  * @skip:                Number of bytes to skip before zeroing
 952  *
 953  * Returns the number of bytes zeroed.
 954  **/
 955 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
 956                        size_t buflen, off_t skip)
 957 {
 958         unsigned int offset = 0;
 959         struct sg_mapping_iter miter;
 960         unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
 961 
 962         sg_miter_start(&miter, sgl, nents, sg_flags);
 963 
 964         if (!sg_miter_skip(&miter, skip))
 965                 return false;
 966 
 967         while (offset < buflen && sg_miter_next(&miter)) {
 968                 unsigned int len;
 969 
 970                 len = min(miter.length, buflen - offset);
 971                 memset(miter.addr, 0, len);
 972 
 973                 offset += len;
 974         }
 975 
 976         sg_miter_stop(&miter);
 977         return offset;
 978 }
 979 EXPORT_SYMBOL(sg_zero_buffer);

/* [<][>][^][v][top][bottom][index][help] */