This source file includes following definitions.
- bio_find_or_create_slab
- bio_put_slab
- bvec_nr_vecs
- bvec_free
- bvec_alloc
- bio_uninit
- bio_free
- bio_init
- bio_reset
- __bio_chain_endio
- bio_chain_endio
- bio_chain
- bio_alloc_rescue
- punt_bios_to_rescuer
- bio_alloc_bioset
- zero_fill_bio_iter
- bio_truncate
- bio_put
- __bio_clone_fast
- bio_clone_fast
- page_is_mergeable
- bio_try_merge_pc_page
- __bio_add_pc_page
- bio_add_pc_page
- __bio_try_merge_page
- __bio_add_page
- bio_add_page
- bio_release_pages
- __bio_iov_bvec_add_pages
- bio_iov_iter_get_pages
- submit_bio_wait_endio
- submit_bio_wait
- bio_advance
- bio_copy_data_iter
- bio_copy_data
- bio_list_copy_data
- bio_alloc_map_data
- bio_copy_from_iter
- bio_copy_to_iter
- bio_free_pages
- bio_uncopy_user
- bio_copy_user_iov
- bio_map_user_iov
- bio_unmap_user
- bio_invalidate_vmalloc_pages
- bio_map_kern_endio
- bio_map_kern
- bio_copy_kern_endio
- bio_copy_kern_endio_read
- bio_copy_kern
- bio_set_pages_dirty
- bio_dirty_fn
- bio_check_pages_dirty
- update_io_ticks
- generic_start_io_acct
- generic_end_io_acct
- bio_remaining_done
- bio_endio
- bio_split
- bio_trim
- biovec_init_pool
- bioset_exit
- bioset_init
- bioset_init_from_src
- bio_disassociate_blkg
- __bio_associate_blkg
- bio_associate_blkg_from_css
- bio_associate_blkg_from_page
- bio_associate_blkg
- bio_clone_blkg_association
- biovec_init_slabs
- init_bio
   1 
   2 
   3 
   4 
   5 #include <linux/mm.h>
   6 #include <linux/swap.h>
   7 #include <linux/bio.h>
   8 #include <linux/blkdev.h>
   9 #include <linux/uio.h>
  10 #include <linux/iocontext.h>
  11 #include <linux/slab.h>
  12 #include <linux/init.h>
  13 #include <linux/kernel.h>
  14 #include <linux/export.h>
  15 #include <linux/mempool.h>
  16 #include <linux/workqueue.h>
  17 #include <linux/cgroup.h>
  18 #include <linux/blk-cgroup.h>
  19 #include <linux/highmem.h>
  20 
  21 #include <trace/events/block.h>
  22 #include "blk.h"
  23 #include "blk-rq-qos.h"
  24 
  25 
  26 
  27 
  28 
  29 #define BIO_INLINE_VECS         4
  30 
  31 
  32 
  33 
  34 
  35 
  36 #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
  37 static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
  38         BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
  39 };
  40 #undef BV
  41 
  42 
  43 
  44 
  45 
  46 struct bio_set fs_bio_set;
  47 EXPORT_SYMBOL(fs_bio_set);
  48 
  49 
  50 
  51 
  52 struct bio_slab {
  53         struct kmem_cache *slab;
  54         unsigned int slab_ref;
  55         unsigned int slab_size;
  56         char name[8];
  57 };
  58 static DEFINE_MUTEX(bio_slab_lock);
  59 static struct bio_slab *bio_slabs;
  60 static unsigned int bio_slab_nr, bio_slab_max;
  61 
  62 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
  63 {
  64         unsigned int sz = sizeof(struct bio) + extra_size;
  65         struct kmem_cache *slab = NULL;
  66         struct bio_slab *bslab, *new_bio_slabs;
  67         unsigned int new_bio_slab_max;
  68         unsigned int i, entry = -1;
  69 
  70         mutex_lock(&bio_slab_lock);
  71 
  72         i = 0;
  73         while (i < bio_slab_nr) {
  74                 bslab = &bio_slabs[i];
  75 
  76                 if (!bslab->slab && entry == -1)
  77                         entry = i;
  78                 else if (bslab->slab_size == sz) {
  79                         slab = bslab->slab;
  80                         bslab->slab_ref++;
  81                         break;
  82                 }
  83                 i++;
  84         }
  85 
  86         if (slab)
  87                 goto out_unlock;
  88 
  89         if (bio_slab_nr == bio_slab_max && entry == -1) {
  90                 new_bio_slab_max = bio_slab_max << 1;
  91                 new_bio_slabs = krealloc(bio_slabs,
  92                                          new_bio_slab_max * sizeof(struct bio_slab),
  93                                          GFP_KERNEL);
  94                 if (!new_bio_slabs)
  95                         goto out_unlock;
  96                 bio_slab_max = new_bio_slab_max;
  97                 bio_slabs = new_bio_slabs;
  98         }
  99         if (entry == -1)
 100                 entry = bio_slab_nr++;
 101 
 102         bslab = &bio_slabs[entry];
 103 
 104         snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
 105         slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
 106                                  SLAB_HWCACHE_ALIGN, NULL);
 107         if (!slab)
 108                 goto out_unlock;
 109 
 110         bslab->slab = slab;
 111         bslab->slab_ref = 1;
 112         bslab->slab_size = sz;
 113 out_unlock:
 114         mutex_unlock(&bio_slab_lock);
 115         return slab;
 116 }
 117 
 118 static void bio_put_slab(struct bio_set *bs)
 119 {
 120         struct bio_slab *bslab = NULL;
 121         unsigned int i;
 122 
 123         mutex_lock(&bio_slab_lock);
 124 
 125         for (i = 0; i < bio_slab_nr; i++) {
 126                 if (bs->bio_slab == bio_slabs[i].slab) {
 127                         bslab = &bio_slabs[i];
 128                         break;
 129                 }
 130         }
 131 
 132         if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
 133                 goto out;
 134 
 135         WARN_ON(!bslab->slab_ref);
 136 
 137         if (--bslab->slab_ref)
 138                 goto out;
 139 
 140         kmem_cache_destroy(bslab->slab);
 141         bslab->slab = NULL;
 142 
 143 out:
 144         mutex_unlock(&bio_slab_lock);
 145 }
 146 
 147 unsigned int bvec_nr_vecs(unsigned short idx)
 148 {
 149         return bvec_slabs[--idx].nr_vecs;
 150 }
 151 
 152 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
 153 {
 154         if (!idx)
 155                 return;
 156         idx--;
 157 
 158         BIO_BUG_ON(idx >= BVEC_POOL_NR);
 159 
 160         if (idx == BVEC_POOL_MAX) {
 161                 mempool_free(bv, pool);
 162         } else {
 163                 struct biovec_slab *bvs = bvec_slabs + idx;
 164 
 165                 kmem_cache_free(bvs->slab, bv);
 166         }
 167 }
 168 
 169 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
 170                            mempool_t *pool)
 171 {
 172         struct bio_vec *bvl;
 173 
 174         
 175 
 176 
 177         switch (nr) {
 178         case 1:
 179                 *idx = 0;
 180                 break;
 181         case 2 ... 4:
 182                 *idx = 1;
 183                 break;
 184         case 5 ... 16:
 185                 *idx = 2;
 186                 break;
 187         case 17 ... 64:
 188                 *idx = 3;
 189                 break;
 190         case 65 ... 128:
 191                 *idx = 4;
 192                 break;
 193         case 129 ... BIO_MAX_PAGES:
 194                 *idx = 5;
 195                 break;
 196         default:
 197                 return NULL;
 198         }
 199 
 200         
 201 
 202 
 203 
 204         if (*idx == BVEC_POOL_MAX) {
 205 fallback:
 206                 bvl = mempool_alloc(pool, gfp_mask);
 207         } else {
 208                 struct biovec_slab *bvs = bvec_slabs + *idx;
 209                 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
 210 
 211                 
 212 
 213 
 214 
 215 
 216                 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
 217 
 218                 
 219 
 220 
 221 
 222                 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
 223                 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
 224                         *idx = BVEC_POOL_MAX;
 225                         goto fallback;
 226                 }
 227         }
 228 
 229         (*idx)++;
 230         return bvl;
 231 }
 232 
 233 void bio_uninit(struct bio *bio)
 234 {
 235         bio_disassociate_blkg(bio);
 236 
 237         if (bio_integrity(bio))
 238                 bio_integrity_free(bio);
 239 }
 240 EXPORT_SYMBOL(bio_uninit);
 241 
 242 static void bio_free(struct bio *bio)
 243 {
 244         struct bio_set *bs = bio->bi_pool;
 245         void *p;
 246 
 247         bio_uninit(bio);
 248 
 249         if (bs) {
 250                 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
 251 
 252                 
 253 
 254 
 255                 p = bio;
 256                 p -= bs->front_pad;
 257 
 258                 mempool_free(p, &bs->bio_pool);
 259         } else {
 260                 
 261                 kfree(bio);
 262         }
 263 }
 264 
 265 
 266 
 267 
 268 
 269 
 270 void bio_init(struct bio *bio, struct bio_vec *table,
 271               unsigned short max_vecs)
 272 {
 273         memset(bio, 0, sizeof(*bio));
 274         atomic_set(&bio->__bi_remaining, 1);
 275         atomic_set(&bio->__bi_cnt, 1);
 276 
 277         bio->bi_io_vec = table;
 278         bio->bi_max_vecs = max_vecs;
 279 }
 280 EXPORT_SYMBOL(bio_init);
 281 
 282 
 283 
 284 
 285 
 286 
 287 
 288 
 289 
 290 
 291 
 292 void bio_reset(struct bio *bio)
 293 {
 294         unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
 295 
 296         bio_uninit(bio);
 297 
 298         memset(bio, 0, BIO_RESET_BYTES);
 299         bio->bi_flags = flags;
 300         atomic_set(&bio->__bi_remaining, 1);
 301 }
 302 EXPORT_SYMBOL(bio_reset);
 303 
 304 static struct bio *__bio_chain_endio(struct bio *bio)
 305 {
 306         struct bio *parent = bio->bi_private;
 307 
 308         if (!parent->bi_status)
 309                 parent->bi_status = bio->bi_status;
 310         bio_put(bio);
 311         return parent;
 312 }
 313 
 314 static void bio_chain_endio(struct bio *bio)
 315 {
 316         bio_endio(__bio_chain_endio(bio));
 317 }
 318 
 319 
 320 
 321 
 322 
 323 
 324 
 325 
 326 
 327 
 328 
 329 
 330 void bio_chain(struct bio *bio, struct bio *parent)
 331 {
 332         BUG_ON(bio->bi_private || bio->bi_end_io);
 333 
 334         bio->bi_private = parent;
 335         bio->bi_end_io  = bio_chain_endio;
 336         bio_inc_remaining(parent);
 337 }
 338 EXPORT_SYMBOL(bio_chain);
 339 
 340 static void bio_alloc_rescue(struct work_struct *work)
 341 {
 342         struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
 343         struct bio *bio;
 344 
 345         while (1) {
 346                 spin_lock(&bs->rescue_lock);
 347                 bio = bio_list_pop(&bs->rescue_list);
 348                 spin_unlock(&bs->rescue_lock);
 349 
 350                 if (!bio)
 351                         break;
 352 
 353                 generic_make_request(bio);
 354         }
 355 }
 356 
 357 static void punt_bios_to_rescuer(struct bio_set *bs)
 358 {
 359         struct bio_list punt, nopunt;
 360         struct bio *bio;
 361 
 362         if (WARN_ON_ONCE(!bs->rescue_workqueue))
 363                 return;
 364         
 365 
 366 
 367 
 368 
 369 
 370 
 371 
 372 
 373 
 374 
 375         bio_list_init(&punt);
 376         bio_list_init(&nopunt);
 377 
 378         while ((bio = bio_list_pop(¤t->bio_list[0])))
 379                 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 380         current->bio_list[0] = nopunt;
 381 
 382         bio_list_init(&nopunt);
 383         while ((bio = bio_list_pop(¤t->bio_list[1])))
 384                 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
 385         current->bio_list[1] = nopunt;
 386 
 387         spin_lock(&bs->rescue_lock);
 388         bio_list_merge(&bs->rescue_list, &punt);
 389         spin_unlock(&bs->rescue_lock);
 390 
 391         queue_work(bs->rescue_workqueue, &bs->rescue_work);
 392 }
 393 
 394 
 395 
 396 
 397 
 398 
 399 
 400 
 401 
 402 
 403 
 404 
 405 
 406 
 407 
 408 
 409 
 410 
 411 
 412 
 413 
 414 
 415 
 416 
 417 
 418 
 419 
 420 
 421 
 422 
 423 
 424 
 425 
 426 
 427 
 428 
 429 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
 430                              struct bio_set *bs)
 431 {
 432         gfp_t saved_gfp = gfp_mask;
 433         unsigned front_pad;
 434         unsigned inline_vecs;
 435         struct bio_vec *bvl = NULL;
 436         struct bio *bio;
 437         void *p;
 438 
 439         if (!bs) {
 440                 if (nr_iovecs > UIO_MAXIOV)
 441                         return NULL;
 442 
 443                 p = kmalloc(sizeof(struct bio) +
 444                             nr_iovecs * sizeof(struct bio_vec),
 445                             gfp_mask);
 446                 front_pad = 0;
 447                 inline_vecs = nr_iovecs;
 448         } else {
 449                 
 450                 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
 451                                  nr_iovecs > 0))
 452                         return NULL;
 453                 
 454 
 455 
 456 
 457 
 458 
 459 
 460 
 461 
 462 
 463 
 464 
 465 
 466 
 467 
 468 
 469 
 470 
 471 
 472 
 473 
 474                 if (current->bio_list &&
 475                     (!bio_list_empty(¤t->bio_list[0]) ||
 476                      !bio_list_empty(¤t->bio_list[1])) &&
 477                     bs->rescue_workqueue)
 478                         gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 479 
 480                 p = mempool_alloc(&bs->bio_pool, gfp_mask);
 481                 if (!p && gfp_mask != saved_gfp) {
 482                         punt_bios_to_rescuer(bs);
 483                         gfp_mask = saved_gfp;
 484                         p = mempool_alloc(&bs->bio_pool, gfp_mask);
 485                 }
 486 
 487                 front_pad = bs->front_pad;
 488                 inline_vecs = BIO_INLINE_VECS;
 489         }
 490 
 491         if (unlikely(!p))
 492                 return NULL;
 493 
 494         bio = p + front_pad;
 495         bio_init(bio, NULL, 0);
 496 
 497         if (nr_iovecs > inline_vecs) {
 498                 unsigned long idx = 0;
 499 
 500                 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
 501                 if (!bvl && gfp_mask != saved_gfp) {
 502                         punt_bios_to_rescuer(bs);
 503                         gfp_mask = saved_gfp;
 504                         bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
 505                 }
 506 
 507                 if (unlikely(!bvl))
 508                         goto err_free;
 509 
 510                 bio->bi_flags |= idx << BVEC_POOL_OFFSET;
 511         } else if (nr_iovecs) {
 512                 bvl = bio->bi_inline_vecs;
 513         }
 514 
 515         bio->bi_pool = bs;
 516         bio->bi_max_vecs = nr_iovecs;
 517         bio->bi_io_vec = bvl;
 518         return bio;
 519 
 520 err_free:
 521         mempool_free(p, &bs->bio_pool);
 522         return NULL;
 523 }
 524 EXPORT_SYMBOL(bio_alloc_bioset);
 525 
 526 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
 527 {
 528         unsigned long flags;
 529         struct bio_vec bv;
 530         struct bvec_iter iter;
 531 
 532         __bio_for_each_segment(bv, bio, iter, start) {
 533                 char *data = bvec_kmap_irq(&bv, &flags);
 534                 memset(data, 0, bv.bv_len);
 535                 flush_dcache_page(bv.bv_page);
 536                 bvec_kunmap_irq(data, &flags);
 537         }
 538 }
 539 EXPORT_SYMBOL(zero_fill_bio_iter);
 540 
 541 
 542 
 543 
 544 
 545 
 546 
 547 
 548 
 549 
 550 
 551 void bio_truncate(struct bio *bio, unsigned new_size)
 552 {
 553         struct bio_vec bv;
 554         struct bvec_iter iter;
 555         unsigned int done = 0;
 556         bool truncated = false;
 557 
 558         if (new_size >= bio->bi_iter.bi_size)
 559                 return;
 560 
 561         if (bio_op(bio) != REQ_OP_READ)
 562                 goto exit;
 563 
 564         bio_for_each_segment(bv, bio, iter) {
 565                 if (done + bv.bv_len > new_size) {
 566                         unsigned offset;
 567 
 568                         if (!truncated)
 569                                 offset = new_size - done;
 570                         else
 571                                 offset = 0;
 572                         zero_user(bv.bv_page, offset, bv.bv_len - offset);
 573                         truncated = true;
 574                 }
 575                 done += bv.bv_len;
 576         }
 577 
 578  exit:
 579         
 580 
 581 
 582 
 583 
 584 
 585 
 586 
 587         bio->bi_iter.bi_size = new_size;
 588 }
 589 
 590 
 591 
 592 
 593 
 594 
 595 
 596 
 597 
 598 void bio_put(struct bio *bio)
 599 {
 600         if (!bio_flagged(bio, BIO_REFFED))
 601                 bio_free(bio);
 602         else {
 603                 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
 604 
 605                 
 606 
 607 
 608                 if (atomic_dec_and_test(&bio->__bi_cnt))
 609                         bio_free(bio);
 610         }
 611 }
 612 EXPORT_SYMBOL(bio_put);
 613 
 614 
 615 
 616 
 617 
 618 
 619 
 620 
 621 
 622 
 623 
 624 
 625 void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 626 {
 627         BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
 628 
 629         
 630 
 631 
 632 
 633         bio->bi_disk = bio_src->bi_disk;
 634         bio->bi_partno = bio_src->bi_partno;
 635         bio_set_flag(bio, BIO_CLONED);
 636         if (bio_flagged(bio_src, BIO_THROTTLED))
 637                 bio_set_flag(bio, BIO_THROTTLED);
 638         bio->bi_opf = bio_src->bi_opf;
 639         bio->bi_ioprio = bio_src->bi_ioprio;
 640         bio->bi_write_hint = bio_src->bi_write_hint;
 641         bio->bi_iter = bio_src->bi_iter;
 642         bio->bi_io_vec = bio_src->bi_io_vec;
 643 
 644         bio_clone_blkg_association(bio, bio_src);
 645         blkcg_bio_issue_init(bio);
 646 }
 647 EXPORT_SYMBOL(__bio_clone_fast);
 648 
 649 
 650 
 651 
 652 
 653 
 654 
 655 
 656 
 657 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
 658 {
 659         struct bio *b;
 660 
 661         b = bio_alloc_bioset(gfp_mask, 0, bs);
 662         if (!b)
 663                 return NULL;
 664 
 665         __bio_clone_fast(b, bio);
 666 
 667         if (bio_integrity(bio)) {
 668                 int ret;
 669 
 670                 ret = bio_integrity_clone(b, bio, gfp_mask);
 671 
 672                 if (ret < 0) {
 673                         bio_put(b);
 674                         return NULL;
 675                 }
 676         }
 677 
 678         return b;
 679 }
 680 EXPORT_SYMBOL(bio_clone_fast);
 681 
 682 static inline bool page_is_mergeable(const struct bio_vec *bv,
 683                 struct page *page, unsigned int len, unsigned int off,
 684                 bool *same_page)
 685 {
 686         phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
 687                 bv->bv_offset + bv->bv_len - 1;
 688         phys_addr_t page_addr = page_to_phys(page);
 689 
 690         if (vec_end_addr + 1 != page_addr + off)
 691                 return false;
 692         if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
 693                 return false;
 694 
 695         *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
 696         if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
 697                 return false;
 698         return true;
 699 }
 700 
 701 static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
 702                 struct page *page, unsigned len, unsigned offset,
 703                 bool *same_page)
 704 {
 705         struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 706         unsigned long mask = queue_segment_boundary(q);
 707         phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
 708         phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
 709 
 710         if ((addr1 | mask) != (addr2 | mask))
 711                 return false;
 712         if (bv->bv_len + len > queue_max_segment_size(q))
 713                 return false;
 714         return __bio_try_merge_page(bio, page, len, offset, same_page);
 715 }
 716 
 717 
 718 
 719 
 720 
 721 
 722 
 723 
 724 
 725 
 726 
 727 
 728 
 729 
 730 
 731 
 732 
 733 static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
 734                 struct page *page, unsigned int len, unsigned int offset,
 735                 bool *same_page)
 736 {
 737         struct bio_vec *bvec;
 738 
 739         
 740 
 741 
 742         if (unlikely(bio_flagged(bio, BIO_CLONED)))
 743                 return 0;
 744 
 745         if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
 746                 return 0;
 747 
 748         if (bio->bi_vcnt > 0) {
 749                 if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page))
 750                         return len;
 751 
 752                 
 753 
 754 
 755 
 756                 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
 757                 if (bvec_gap_to_prev(q, bvec, offset))
 758                         return 0;
 759         }
 760 
 761         if (bio_full(bio, len))
 762                 return 0;
 763 
 764         if (bio->bi_vcnt >= queue_max_segments(q))
 765                 return 0;
 766 
 767         bvec = &bio->bi_io_vec[bio->bi_vcnt];
 768         bvec->bv_page = page;
 769         bvec->bv_len = len;
 770         bvec->bv_offset = offset;
 771         bio->bi_vcnt++;
 772         bio->bi_iter.bi_size += len;
 773         return len;
 774 }
 775 
 776 int bio_add_pc_page(struct request_queue *q, struct bio *bio,
 777                 struct page *page, unsigned int len, unsigned int offset)
 778 {
 779         bool same_page = false;
 780         return __bio_add_pc_page(q, bio, page, len, offset, &same_page);
 781 }
 782 EXPORT_SYMBOL(bio_add_pc_page);
 783 
 784 
 785 
 786 
 787 
 788 
 789 
 790 
 791 
 792 
 793 
 794 
 795 
 796 
 797 
 798 
 799 
 800 bool __bio_try_merge_page(struct bio *bio, struct page *page,
 801                 unsigned int len, unsigned int off, bool *same_page)
 802 {
 803         if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
 804                 return false;
 805 
 806         if (bio->bi_vcnt > 0) {
 807                 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 808 
 809                 if (page_is_mergeable(bv, page, len, off, same_page)) {
 810                         if (bio->bi_iter.bi_size > UINT_MAX - len)
 811                                 return false;
 812                         bv->bv_len += len;
 813                         bio->bi_iter.bi_size += len;
 814                         return true;
 815                 }
 816         }
 817         return false;
 818 }
 819 EXPORT_SYMBOL_GPL(__bio_try_merge_page);
 820 
 821 
 822 
 823 
 824 
 825 
 826 
 827 
 828 
 829 
 830 
 831 void __bio_add_page(struct bio *bio, struct page *page,
 832                 unsigned int len, unsigned int off)
 833 {
 834         struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
 835 
 836         WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
 837         WARN_ON_ONCE(bio_full(bio, len));
 838 
 839         bv->bv_page = page;
 840         bv->bv_offset = off;
 841         bv->bv_len = len;
 842 
 843         bio->bi_iter.bi_size += len;
 844         bio->bi_vcnt++;
 845 
 846         if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
 847                 bio_set_flag(bio, BIO_WORKINGSET);
 848 }
 849 EXPORT_SYMBOL_GPL(__bio_add_page);
 850 
 851 
 852 
 853 
 854 
 855 
 856 
 857 
 858 
 859 
 860 
 861 int bio_add_page(struct bio *bio, struct page *page,
 862                  unsigned int len, unsigned int offset)
 863 {
 864         bool same_page = false;
 865 
 866         if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
 867                 if (bio_full(bio, len))
 868                         return 0;
 869                 __bio_add_page(bio, page, len, offset);
 870         }
 871         return len;
 872 }
 873 EXPORT_SYMBOL(bio_add_page);
 874 
 875 void bio_release_pages(struct bio *bio, bool mark_dirty)
 876 {
 877         struct bvec_iter_all iter_all;
 878         struct bio_vec *bvec;
 879 
 880         if (bio_flagged(bio, BIO_NO_PAGE_REF))
 881                 return;
 882 
 883         bio_for_each_segment_all(bvec, bio, iter_all) {
 884                 if (mark_dirty && !PageCompound(bvec->bv_page))
 885                         set_page_dirty_lock(bvec->bv_page);
 886                 put_page(bvec->bv_page);
 887         }
 888 }
 889 
 890 static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
 891 {
 892         const struct bio_vec *bv = iter->bvec;
 893         unsigned int len;
 894         size_t size;
 895 
 896         if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
 897                 return -EINVAL;
 898 
 899         len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
 900         size = bio_add_page(bio, bv->bv_page, len,
 901                                 bv->bv_offset + iter->iov_offset);
 902         if (unlikely(size != len))
 903                 return -EINVAL;
 904         iov_iter_advance(iter, size);
 905         return 0;
 906 }
 907 
 908 #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
 909 
 910 
 911 
 912 
 913 
 914 
 915 
 916 
 917 
 918 
 919 
 920 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 921 {
 922         unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
 923         unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
 924         struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
 925         struct page **pages = (struct page **)bv;
 926         bool same_page = false;
 927         ssize_t size, left;
 928         unsigned len, i;
 929         size_t offset;
 930 
 931         
 932 
 933 
 934 
 935 
 936         BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
 937         pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
 938 
 939         size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
 940         if (unlikely(size <= 0))
 941                 return size ? size : -EFAULT;
 942 
 943         for (left = size, i = 0; left > 0; left -= len, i++) {
 944                 struct page *page = pages[i];
 945 
 946                 len = min_t(size_t, PAGE_SIZE - offset, left);
 947 
 948                 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
 949                         if (same_page)
 950                                 put_page(page);
 951                 } else {
 952                         if (WARN_ON_ONCE(bio_full(bio, len)))
 953                                 return -EINVAL;
 954                         __bio_add_page(bio, page, len, offset);
 955                 }
 956                 offset = 0;
 957         }
 958 
 959         iov_iter_advance(iter, size);
 960         return 0;
 961 }
 962 
 963 
 964 
 965 
 966 
 967 
 968 
 969 
 970 
 971 
 972 
 973 
 974 
 975 
 976 
 977 
 978 
 979 
 980 
 981 
 982 
 983 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 984 {
 985         const bool is_bvec = iov_iter_is_bvec(iter);
 986         int ret;
 987 
 988         if (WARN_ON_ONCE(bio->bi_vcnt))
 989                 return -EINVAL;
 990 
 991         do {
 992                 if (is_bvec)
 993                         ret = __bio_iov_bvec_add_pages(bio, iter);
 994                 else
 995                         ret = __bio_iov_iter_get_pages(bio, iter);
 996         } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
 997 
 998         if (is_bvec)
 999                 bio_set_flag(bio, BIO_NO_PAGE_REF);
1000         return bio->bi_vcnt ? 0 : ret;
1001 }
1002 
1003 static void submit_bio_wait_endio(struct bio *bio)
1004 {
1005         complete(bio->bi_private);
1006 }
1007 
1008 
1009 
1010 
1011 
1012 
1013 
1014 
1015 
1016 
1017 
1018 
1019 int submit_bio_wait(struct bio *bio)
1020 {
1021         DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
1022 
1023         bio->bi_private = &done;
1024         bio->bi_end_io = submit_bio_wait_endio;
1025         bio->bi_opf |= REQ_SYNC;
1026         submit_bio(bio);
1027         wait_for_completion_io(&done);
1028 
1029         return blk_status_to_errno(bio->bi_status);
1030 }
1031 EXPORT_SYMBOL(submit_bio_wait);
1032 
1033 
1034 
1035 
1036 
1037 
1038 
1039 
1040 
1041 
1042 
1043 
1044 void bio_advance(struct bio *bio, unsigned bytes)
1045 {
1046         if (bio_integrity(bio))
1047                 bio_integrity_advance(bio, bytes);
1048 
1049         bio_advance_iter(bio, &bio->bi_iter, bytes);
1050 }
1051 EXPORT_SYMBOL(bio_advance);
1052 
1053 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1054                         struct bio *src, struct bvec_iter *src_iter)
1055 {
1056         struct bio_vec src_bv, dst_bv;
1057         void *src_p, *dst_p;
1058         unsigned bytes;
1059 
1060         while (src_iter->bi_size && dst_iter->bi_size) {
1061                 src_bv = bio_iter_iovec(src, *src_iter);
1062                 dst_bv = bio_iter_iovec(dst, *dst_iter);
1063 
1064                 bytes = min(src_bv.bv_len, dst_bv.bv_len);
1065 
1066                 src_p = kmap_atomic(src_bv.bv_page);
1067                 dst_p = kmap_atomic(dst_bv.bv_page);
1068 
1069                 memcpy(dst_p + dst_bv.bv_offset,
1070                        src_p + src_bv.bv_offset,
1071                        bytes);
1072 
1073                 kunmap_atomic(dst_p);
1074                 kunmap_atomic(src_p);
1075 
1076                 flush_dcache_page(dst_bv.bv_page);
1077 
1078                 bio_advance_iter(src, src_iter, bytes);
1079                 bio_advance_iter(dst, dst_iter, bytes);
1080         }
1081 }
1082 EXPORT_SYMBOL(bio_copy_data_iter);
1083 
1084 
1085 
1086 
1087 
1088 
1089 
1090 
1091 
1092 void bio_copy_data(struct bio *dst, struct bio *src)
1093 {
1094         struct bvec_iter src_iter = src->bi_iter;
1095         struct bvec_iter dst_iter = dst->bi_iter;
1096 
1097         bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1098 }
1099 EXPORT_SYMBOL(bio_copy_data);
1100 
1101 
1102 
1103 
1104 
1105 
1106 
1107 
1108 
1109 
1110 
1111 void bio_list_copy_data(struct bio *dst, struct bio *src)
1112 {
1113         struct bvec_iter src_iter = src->bi_iter;
1114         struct bvec_iter dst_iter = dst->bi_iter;
1115 
1116         while (1) {
1117                 if (!src_iter.bi_size) {
1118                         src = src->bi_next;
1119                         if (!src)
1120                                 break;
1121 
1122                         src_iter = src->bi_iter;
1123                 }
1124 
1125                 if (!dst_iter.bi_size) {
1126                         dst = dst->bi_next;
1127                         if (!dst)
1128                                 break;
1129 
1130                         dst_iter = dst->bi_iter;
1131                 }
1132 
1133                 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1134         }
1135 }
1136 EXPORT_SYMBOL(bio_list_copy_data);
1137 
1138 struct bio_map_data {
1139         int is_our_pages;
1140         struct iov_iter iter;
1141         struct iovec iov[];
1142 };
1143 
1144 static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
1145                                                gfp_t gfp_mask)
1146 {
1147         struct bio_map_data *bmd;
1148         if (data->nr_segs > UIO_MAXIOV)
1149                 return NULL;
1150 
1151         bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
1152         if (!bmd)
1153                 return NULL;
1154         memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
1155         bmd->iter = *data;
1156         bmd->iter.iov = bmd->iov;
1157         return bmd;
1158 }
1159 
1160 
1161 
1162 
1163 
1164 
1165 
1166 
1167 
1168 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
1169 {
1170         struct bio_vec *bvec;
1171         struct bvec_iter_all iter_all;
1172 
1173         bio_for_each_segment_all(bvec, bio, iter_all) {
1174                 ssize_t ret;
1175 
1176                 ret = copy_page_from_iter(bvec->bv_page,
1177                                           bvec->bv_offset,
1178                                           bvec->bv_len,
1179                                           iter);
1180 
1181                 if (!iov_iter_count(iter))
1182                         break;
1183 
1184                 if (ret < bvec->bv_len)
1185                         return -EFAULT;
1186         }
1187 
1188         return 0;
1189 }
1190 
1191 
1192 
1193 
1194 
1195 
1196 
1197 
1198 
1199 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1200 {
1201         struct bio_vec *bvec;
1202         struct bvec_iter_all iter_all;
1203 
1204         bio_for_each_segment_all(bvec, bio, iter_all) {
1205                 ssize_t ret;
1206 
1207                 ret = copy_page_to_iter(bvec->bv_page,
1208                                         bvec->bv_offset,
1209                                         bvec->bv_len,
1210                                         &iter);
1211 
1212                 if (!iov_iter_count(&iter))
1213                         break;
1214 
1215                 if (ret < bvec->bv_len)
1216                         return -EFAULT;
1217         }
1218 
1219         return 0;
1220 }
1221 
1222 void bio_free_pages(struct bio *bio)
1223 {
1224         struct bio_vec *bvec;
1225         struct bvec_iter_all iter_all;
1226 
1227         bio_for_each_segment_all(bvec, bio, iter_all)
1228                 __free_page(bvec->bv_page);
1229 }
1230 EXPORT_SYMBOL(bio_free_pages);
1231 
1232 
1233 
1234 
1235 
1236 
1237 
1238 
1239 int bio_uncopy_user(struct bio *bio)
1240 {
1241         struct bio_map_data *bmd = bio->bi_private;
1242         int ret = 0;
1243 
1244         if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1245                 
1246 
1247 
1248 
1249 
1250                 if (!current->mm)
1251                         ret = -EINTR;
1252                 else if (bio_data_dir(bio) == READ)
1253                         ret = bio_copy_to_iter(bio, bmd->iter);
1254                 if (bmd->is_our_pages)
1255                         bio_free_pages(bio);
1256         }
1257         kfree(bmd);
1258         bio_put(bio);
1259         return ret;
1260 }
1261 
1262 
1263 
1264 
1265 
1266 
1267 
1268 
1269 
1270 
1271 
1272 
1273 struct bio *bio_copy_user_iov(struct request_queue *q,
1274                               struct rq_map_data *map_data,
1275                               struct iov_iter *iter,
1276                               gfp_t gfp_mask)
1277 {
1278         struct bio_map_data *bmd;
1279         struct page *page;
1280         struct bio *bio;
1281         int i = 0, ret;
1282         int nr_pages;
1283         unsigned int len = iter->count;
1284         unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1285 
1286         bmd = bio_alloc_map_data(iter, gfp_mask);
1287         if (!bmd)
1288                 return ERR_PTR(-ENOMEM);
1289 
1290         
1291 
1292 
1293 
1294 
1295         bmd->is_our_pages = map_data ? 0 : 1;
1296 
1297         nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1298         if (nr_pages > BIO_MAX_PAGES)
1299                 nr_pages = BIO_MAX_PAGES;
1300 
1301         ret = -ENOMEM;
1302         bio = bio_kmalloc(gfp_mask, nr_pages);
1303         if (!bio)
1304                 goto out_bmd;
1305 
1306         ret = 0;
1307 
1308         if (map_data) {
1309                 nr_pages = 1 << map_data->page_order;
1310                 i = map_data->offset / PAGE_SIZE;
1311         }
1312         while (len) {
1313                 unsigned int bytes = PAGE_SIZE;
1314 
1315                 bytes -= offset;
1316 
1317                 if (bytes > len)
1318                         bytes = len;
1319 
1320                 if (map_data) {
1321                         if (i == map_data->nr_entries * nr_pages) {
1322                                 ret = -ENOMEM;
1323                                 break;
1324                         }
1325 
1326                         page = map_data->pages[i / nr_pages];
1327                         page += (i % nr_pages);
1328 
1329                         i++;
1330                 } else {
1331                         page = alloc_page(q->bounce_gfp | gfp_mask);
1332                         if (!page) {
1333                                 ret = -ENOMEM;
1334                                 break;
1335                         }
1336                 }
1337 
1338                 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
1339                         if (!map_data)
1340                                 __free_page(page);
1341                         break;
1342                 }
1343 
1344                 len -= bytes;
1345                 offset = 0;
1346         }
1347 
1348         if (ret)
1349                 goto cleanup;
1350 
1351         if (map_data)
1352                 map_data->offset += bio->bi_iter.bi_size;
1353 
1354         
1355 
1356 
1357         if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
1358             (map_data && map_data->from_user)) {
1359                 ret = bio_copy_from_iter(bio, iter);
1360                 if (ret)
1361                         goto cleanup;
1362         } else {
1363                 if (bmd->is_our_pages)
1364                         zero_fill_bio(bio);
1365                 iov_iter_advance(iter, bio->bi_iter.bi_size);
1366         }
1367 
1368         bio->bi_private = bmd;
1369         if (map_data && map_data->null_mapped)
1370                 bio_set_flag(bio, BIO_NULL_MAPPED);
1371         return bio;
1372 cleanup:
1373         if (!map_data)
1374                 bio_free_pages(bio);
1375         bio_put(bio);
1376 out_bmd:
1377         kfree(bmd);
1378         return ERR_PTR(ret);
1379 }
1380 
1381 
1382 
1383 
1384 
1385 
1386 
1387 
1388 
1389 
1390 struct bio *bio_map_user_iov(struct request_queue *q,
1391                              struct iov_iter *iter,
1392                              gfp_t gfp_mask)
1393 {
1394         int j;
1395         struct bio *bio;
1396         int ret;
1397 
1398         if (!iov_iter_count(iter))
1399                 return ERR_PTR(-EINVAL);
1400 
1401         bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1402         if (!bio)
1403                 return ERR_PTR(-ENOMEM);
1404 
1405         while (iov_iter_count(iter)) {
1406                 struct page **pages;
1407                 ssize_t bytes;
1408                 size_t offs, added = 0;
1409                 int npages;
1410 
1411                 bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
1412                 if (unlikely(bytes <= 0)) {
1413                         ret = bytes ? bytes : -EFAULT;
1414                         goto out_unmap;
1415                 }
1416 
1417                 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
1418 
1419                 if (unlikely(offs & queue_dma_alignment(q))) {
1420                         ret = -EINVAL;
1421                         j = 0;
1422                 } else {
1423                         for (j = 0; j < npages; j++) {
1424                                 struct page *page = pages[j];
1425                                 unsigned int n = PAGE_SIZE - offs;
1426                                 bool same_page = false;
1427 
1428                                 if (n > bytes)
1429                                         n = bytes;
1430 
1431                                 if (!__bio_add_pc_page(q, bio, page, n, offs,
1432                                                 &same_page)) {
1433                                         if (same_page)
1434                                                 put_page(page);
1435                                         break;
1436                                 }
1437 
1438                                 added += n;
1439                                 bytes -= n;
1440                                 offs = 0;
1441                         }
1442                         iov_iter_advance(iter, added);
1443                 }
1444                 
1445 
1446 
1447                 while (j < npages)
1448                         put_page(pages[j++]);
1449                 kvfree(pages);
1450                 
1451                 if (bytes)
1452                         break;
1453         }
1454 
1455         bio_set_flag(bio, BIO_USER_MAPPED);
1456 
1457         
1458 
1459 
1460 
1461 
1462 
1463         bio_get(bio);
1464         return bio;
1465 
1466  out_unmap:
1467         bio_release_pages(bio, false);
1468         bio_put(bio);
1469         return ERR_PTR(ret);
1470 }
1471 
1472 
1473 
1474 
1475 
1476 
1477 
1478 
1479 
1480 
1481 void bio_unmap_user(struct bio *bio)
1482 {
1483         bio_release_pages(bio, bio_data_dir(bio) == READ);
1484         bio_put(bio);
1485         bio_put(bio);
1486 }
1487 
1488 static void bio_invalidate_vmalloc_pages(struct bio *bio)
1489 {
1490 #ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
1491         if (bio->bi_private && !op_is_write(bio_op(bio))) {
1492                 unsigned long i, len = 0;
1493 
1494                 for (i = 0; i < bio->bi_vcnt; i++)
1495                         len += bio->bi_io_vec[i].bv_len;
1496                 invalidate_kernel_vmap_range(bio->bi_private, len);
1497         }
1498 #endif
1499 }
1500 
1501 static void bio_map_kern_endio(struct bio *bio)
1502 {
1503         bio_invalidate_vmalloc_pages(bio);
1504         bio_put(bio);
1505 }
1506 
1507 
1508 
1509 
1510 
1511 
1512 
1513 
1514 
1515 
1516 
1517 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1518                          gfp_t gfp_mask)
1519 {
1520         unsigned long kaddr = (unsigned long)data;
1521         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1522         unsigned long start = kaddr >> PAGE_SHIFT;
1523         const int nr_pages = end - start;
1524         bool is_vmalloc = is_vmalloc_addr(data);
1525         struct page *page;
1526         int offset, i;
1527         struct bio *bio;
1528 
1529         bio = bio_kmalloc(gfp_mask, nr_pages);
1530         if (!bio)
1531                 return ERR_PTR(-ENOMEM);
1532 
1533         if (is_vmalloc) {
1534                 flush_kernel_vmap_range(data, len);
1535                 bio->bi_private = data;
1536         }
1537 
1538         offset = offset_in_page(kaddr);
1539         for (i = 0; i < nr_pages; i++) {
1540                 unsigned int bytes = PAGE_SIZE - offset;
1541 
1542                 if (len <= 0)
1543                         break;
1544 
1545                 if (bytes > len)
1546                         bytes = len;
1547 
1548                 if (!is_vmalloc)
1549                         page = virt_to_page(data);
1550                 else
1551                         page = vmalloc_to_page(data);
1552                 if (bio_add_pc_page(q, bio, page, bytes,
1553                                     offset) < bytes) {
1554                         
1555                         bio_put(bio);
1556                         return ERR_PTR(-EINVAL);
1557                 }
1558 
1559                 data += bytes;
1560                 len -= bytes;
1561                 offset = 0;
1562         }
1563 
1564         bio->bi_end_io = bio_map_kern_endio;
1565         return bio;
1566 }
1567 
1568 static void bio_copy_kern_endio(struct bio *bio)
1569 {
1570         bio_free_pages(bio);
1571         bio_put(bio);
1572 }
1573 
1574 static void bio_copy_kern_endio_read(struct bio *bio)
1575 {
1576         char *p = bio->bi_private;
1577         struct bio_vec *bvec;
1578         struct bvec_iter_all iter_all;
1579 
1580         bio_for_each_segment_all(bvec, bio, iter_all) {
1581                 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1582                 p += bvec->bv_len;
1583         }
1584 
1585         bio_copy_kern_endio(bio);
1586 }
1587 
1588 
1589 
1590 
1591 
1592 
1593 
1594 
1595 
1596 
1597 
1598 
1599 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1600                           gfp_t gfp_mask, int reading)
1601 {
1602         unsigned long kaddr = (unsigned long)data;
1603         unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1604         unsigned long start = kaddr >> PAGE_SHIFT;
1605         struct bio *bio;
1606         void *p = data;
1607         int nr_pages = 0;
1608 
1609         
1610 
1611 
1612         if (end < start)
1613                 return ERR_PTR(-EINVAL);
1614 
1615         nr_pages = end - start;
1616         bio = bio_kmalloc(gfp_mask, nr_pages);
1617         if (!bio)
1618                 return ERR_PTR(-ENOMEM);
1619 
1620         while (len) {
1621                 struct page *page;
1622                 unsigned int bytes = PAGE_SIZE;
1623 
1624                 if (bytes > len)
1625                         bytes = len;
1626 
1627                 page = alloc_page(q->bounce_gfp | gfp_mask);
1628                 if (!page)
1629                         goto cleanup;
1630 
1631                 if (!reading)
1632                         memcpy(page_address(page), p, bytes);
1633 
1634                 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1635                         break;
1636 
1637                 len -= bytes;
1638                 p += bytes;
1639         }
1640 
1641         if (reading) {
1642                 bio->bi_end_io = bio_copy_kern_endio_read;
1643                 bio->bi_private = data;
1644         } else {
1645                 bio->bi_end_io = bio_copy_kern_endio;
1646         }
1647 
1648         return bio;
1649 
1650 cleanup:
1651         bio_free_pages(bio);
1652         bio_put(bio);
1653         return ERR_PTR(-ENOMEM);
1654 }
1655 
1656 
1657 
1658 
1659 
1660 
1661 
1662 
1663 
1664 
1665 
1666 
1667 
1668 
1669 
1670 
1671 
1672 
1673 
1674 
1675 
1676 
1677 
1678 
1679 
1680 
1681 
1682 
1683 
1684 
1685 void bio_set_pages_dirty(struct bio *bio)
1686 {
1687         struct bio_vec *bvec;
1688         struct bvec_iter_all iter_all;
1689 
1690         bio_for_each_segment_all(bvec, bio, iter_all) {
1691                 if (!PageCompound(bvec->bv_page))
1692                         set_page_dirty_lock(bvec->bv_page);
1693         }
1694 }
1695 
1696 
1697 
1698 
1699 
1700 
1701 
1702 
1703 
1704 
1705 
1706 
1707 static void bio_dirty_fn(struct work_struct *work);
1708 
1709 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1710 static DEFINE_SPINLOCK(bio_dirty_lock);
1711 static struct bio *bio_dirty_list;
1712 
1713 
1714 
1715 
1716 static void bio_dirty_fn(struct work_struct *work)
1717 {
1718         struct bio *bio, *next;
1719 
1720         spin_lock_irq(&bio_dirty_lock);
1721         next = bio_dirty_list;
1722         bio_dirty_list = NULL;
1723         spin_unlock_irq(&bio_dirty_lock);
1724 
1725         while ((bio = next) != NULL) {
1726                 next = bio->bi_private;
1727 
1728                 bio_release_pages(bio, true);
1729                 bio_put(bio);
1730         }
1731 }
1732 
1733 void bio_check_pages_dirty(struct bio *bio)
1734 {
1735         struct bio_vec *bvec;
1736         unsigned long flags;
1737         struct bvec_iter_all iter_all;
1738 
1739         bio_for_each_segment_all(bvec, bio, iter_all) {
1740                 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1741                         goto defer;
1742         }
1743 
1744         bio_release_pages(bio, false);
1745         bio_put(bio);
1746         return;
1747 defer:
1748         spin_lock_irqsave(&bio_dirty_lock, flags);
1749         bio->bi_private = bio_dirty_list;
1750         bio_dirty_list = bio;
1751         spin_unlock_irqrestore(&bio_dirty_lock, flags);
1752         schedule_work(&bio_dirty_work);
1753 }
1754 
1755 void update_io_ticks(struct hd_struct *part, unsigned long now)
1756 {
1757         unsigned long stamp;
1758 again:
1759         stamp = READ_ONCE(part->stamp);
1760         if (unlikely(stamp != now)) {
1761                 if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
1762                         __part_stat_add(part, io_ticks, 1);
1763                 }
1764         }
1765         if (part->partno) {
1766                 part = &part_to_disk(part)->part0;
1767                 goto again;
1768         }
1769 }
1770 
1771 void generic_start_io_acct(struct request_queue *q, int op,
1772                            unsigned long sectors, struct hd_struct *part)
1773 {
1774         const int sgrp = op_stat_group(op);
1775 
1776         part_stat_lock();
1777 
1778         update_io_ticks(part, jiffies);
1779         part_stat_inc(part, ios[sgrp]);
1780         part_stat_add(part, sectors[sgrp], sectors);
1781         part_inc_in_flight(q, part, op_is_write(op));
1782 
1783         part_stat_unlock();
1784 }
1785 EXPORT_SYMBOL(generic_start_io_acct);
1786 
1787 void generic_end_io_acct(struct request_queue *q, int req_op,
1788                          struct hd_struct *part, unsigned long start_time)
1789 {
1790         unsigned long now = jiffies;
1791         unsigned long duration = now - start_time;
1792         const int sgrp = op_stat_group(req_op);
1793 
1794         part_stat_lock();
1795 
1796         update_io_ticks(part, now);
1797         part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1798         part_stat_add(part, time_in_queue, duration);
1799         part_dec_in_flight(q, part, op_is_write(req_op));
1800 
1801         part_stat_unlock();
1802 }
1803 EXPORT_SYMBOL(generic_end_io_acct);
1804 
1805 static inline bool bio_remaining_done(struct bio *bio)
1806 {
1807         
1808 
1809 
1810 
1811         if (!bio_flagged(bio, BIO_CHAIN))
1812                 return true;
1813 
1814         BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1815 
1816         if (atomic_dec_and_test(&bio->__bi_remaining)) {
1817                 bio_clear_flag(bio, BIO_CHAIN);
1818                 return true;
1819         }
1820 
1821         return false;
1822 }
1823 
1824 
1825 
1826 
1827 
1828 
1829 
1830 
1831 
1832 
1833 
1834 
1835 
1836 
1837 
1838 void bio_endio(struct bio *bio)
1839 {
1840 again:
1841         if (!bio_remaining_done(bio))
1842                 return;
1843         if (!bio_integrity_endio(bio))
1844                 return;
1845 
1846         if (bio->bi_disk)
1847                 rq_qos_done_bio(bio->bi_disk->queue, bio);
1848 
1849         
1850 
1851 
1852 
1853 
1854 
1855 
1856 
1857         if (bio->bi_end_io == bio_chain_endio) {
1858                 bio = __bio_chain_endio(bio);
1859                 goto again;
1860         }
1861 
1862         if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1863                 trace_block_bio_complete(bio->bi_disk->queue, bio,
1864                                          blk_status_to_errno(bio->bi_status));
1865                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1866         }
1867 
1868         blk_throtl_bio_endio(bio);
1869         
1870         bio_uninit(bio);
1871         if (bio->bi_end_io)
1872                 bio->bi_end_io(bio);
1873 }
1874 EXPORT_SYMBOL(bio_endio);
1875 
1876 
1877 
1878 
1879 
1880 
1881 
1882 
1883 
1884 
1885 
1886 
1887 
1888 
1889 
1890 struct bio *bio_split(struct bio *bio, int sectors,
1891                       gfp_t gfp, struct bio_set *bs)
1892 {
1893         struct bio *split;
1894 
1895         BUG_ON(sectors <= 0);
1896         BUG_ON(sectors >= bio_sectors(bio));
1897 
1898         split = bio_clone_fast(bio, gfp, bs);
1899         if (!split)
1900                 return NULL;
1901 
1902         split->bi_iter.bi_size = sectors << 9;
1903 
1904         if (bio_integrity(split))
1905                 bio_integrity_trim(split);
1906 
1907         bio_advance(bio, split->bi_iter.bi_size);
1908 
1909         if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1910                 bio_set_flag(split, BIO_TRACE_COMPLETION);
1911 
1912         return split;
1913 }
1914 EXPORT_SYMBOL(bio_split);
1915 
1916 
1917 
1918 
1919 
1920 
1921 
1922 void bio_trim(struct bio *bio, int offset, int size)
1923 {
1924         
1925 
1926 
1927 
1928         size <<= 9;
1929         if (offset == 0 && size == bio->bi_iter.bi_size)
1930                 return;
1931 
1932         bio_advance(bio, offset << 9);
1933         bio->bi_iter.bi_size = size;
1934 
1935         if (bio_integrity(bio))
1936                 bio_integrity_trim(bio);
1937 
1938 }
1939 EXPORT_SYMBOL_GPL(bio_trim);
1940 
1941 
1942 
1943 
1944 
1945 int biovec_init_pool(mempool_t *pool, int pool_entries)
1946 {
1947         struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1948 
1949         return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1950 }
1951 
1952 
1953 
1954 
1955 
1956 
1957 
1958 void bioset_exit(struct bio_set *bs)
1959 {
1960         if (bs->rescue_workqueue)
1961                 destroy_workqueue(bs->rescue_workqueue);
1962         bs->rescue_workqueue = NULL;
1963 
1964         mempool_exit(&bs->bio_pool);
1965         mempool_exit(&bs->bvec_pool);
1966 
1967         bioset_integrity_free(bs);
1968         if (bs->bio_slab)
1969                 bio_put_slab(bs);
1970         bs->bio_slab = NULL;
1971 }
1972 EXPORT_SYMBOL(bioset_exit);
1973 
1974 
1975 
1976 
1977 
1978 
1979 
1980 
1981 
1982 
1983 
1984 
1985 
1986 
1987 
1988 
1989 
1990 
1991 
1992 
1993 
1994 
1995 int bioset_init(struct bio_set *bs,
1996                 unsigned int pool_size,
1997                 unsigned int front_pad,
1998                 int flags)
1999 {
2000         unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
2001 
2002         bs->front_pad = front_pad;
2003 
2004         spin_lock_init(&bs->rescue_lock);
2005         bio_list_init(&bs->rescue_list);
2006         INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
2007 
2008         bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
2009         if (!bs->bio_slab)
2010                 return -ENOMEM;
2011 
2012         if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
2013                 goto bad;
2014 
2015         if ((flags & BIOSET_NEED_BVECS) &&
2016             biovec_init_pool(&bs->bvec_pool, pool_size))
2017                 goto bad;
2018 
2019         if (!(flags & BIOSET_NEED_RESCUER))
2020                 return 0;
2021 
2022         bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
2023         if (!bs->rescue_workqueue)
2024                 goto bad;
2025 
2026         return 0;
2027 bad:
2028         bioset_exit(bs);
2029         return -ENOMEM;
2030 }
2031 EXPORT_SYMBOL(bioset_init);
2032 
2033 
2034 
2035 
2036 
2037 int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
2038 {
2039         int flags;
2040 
2041         flags = 0;
2042         if (src->bvec_pool.min_nr)
2043                 flags |= BIOSET_NEED_BVECS;
2044         if (src->rescue_workqueue)
2045                 flags |= BIOSET_NEED_RESCUER;
2046 
2047         return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
2048 }
2049 EXPORT_SYMBOL(bioset_init_from_src);
2050 
2051 #ifdef CONFIG_BLK_CGROUP
2052 
2053 
2054 
2055 
2056 
2057 
2058 
2059 void bio_disassociate_blkg(struct bio *bio)
2060 {
2061         if (bio->bi_blkg) {
2062                 blkg_put(bio->bi_blkg);
2063                 bio->bi_blkg = NULL;
2064         }
2065 }
2066 EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
2067 
2068 
2069 
2070 
2071 
2072 
2073 
2074 
2075 
2076 
2077 
2078 
2079 
2080 
2081 
2082 static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
2083 {
2084         bio_disassociate_blkg(bio);
2085 
2086         bio->bi_blkg = blkg_tryget_closest(blkg);
2087 }
2088 
2089 
2090 
2091 
2092 
2093 
2094 
2095 
2096 
2097 
2098 void bio_associate_blkg_from_css(struct bio *bio,
2099                                  struct cgroup_subsys_state *css)
2100 {
2101         struct request_queue *q = bio->bi_disk->queue;
2102         struct blkcg_gq *blkg;
2103 
2104         rcu_read_lock();
2105 
2106         if (!css || !css->parent)
2107                 blkg = q->root_blkg;
2108         else
2109                 blkg = blkg_lookup_create(css_to_blkcg(css), q);
2110 
2111         __bio_associate_blkg(bio, blkg);
2112 
2113         rcu_read_unlock();
2114 }
2115 EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
2116 
2117 #ifdef CONFIG_MEMCG
2118 
2119 
2120 
2121 
2122 
2123 
2124 
2125 
2126 
2127 void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
2128 {
2129         struct cgroup_subsys_state *css;
2130 
2131         if (!page->mem_cgroup)
2132                 return;
2133 
2134         rcu_read_lock();
2135 
2136         css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
2137         bio_associate_blkg_from_css(bio, css);
2138 
2139         rcu_read_unlock();
2140 }
2141 #endif 
2142 
2143 
2144 
2145 
2146 
2147 
2148 
2149 
2150 
2151 
2152 void bio_associate_blkg(struct bio *bio)
2153 {
2154         struct cgroup_subsys_state *css;
2155 
2156         rcu_read_lock();
2157 
2158         if (bio->bi_blkg)
2159                 css = &bio_blkcg(bio)->css;
2160         else
2161                 css = blkcg_css();
2162 
2163         bio_associate_blkg_from_css(bio, css);
2164 
2165         rcu_read_unlock();
2166 }
2167 EXPORT_SYMBOL_GPL(bio_associate_blkg);
2168 
2169 
2170 
2171 
2172 
2173 
2174 void bio_clone_blkg_association(struct bio *dst, struct bio *src)
2175 {
2176         rcu_read_lock();
2177 
2178         if (src->bi_blkg)
2179                 __bio_associate_blkg(dst, src->bi_blkg);
2180 
2181         rcu_read_unlock();
2182 }
2183 EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
2184 #endif 
2185 
2186 static void __init biovec_init_slabs(void)
2187 {
2188         int i;
2189 
2190         for (i = 0; i < BVEC_POOL_NR; i++) {
2191                 int size;
2192                 struct biovec_slab *bvs = bvec_slabs + i;
2193 
2194                 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2195                         bvs->slab = NULL;
2196                         continue;
2197                 }
2198 
2199                 size = bvs->nr_vecs * sizeof(struct bio_vec);
2200                 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2201                                 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2202         }
2203 }
2204 
2205 static int __init init_bio(void)
2206 {
2207         bio_slab_max = 2;
2208         bio_slab_nr = 0;
2209         bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
2210                             GFP_KERNEL);
2211 
2212         BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
2213 
2214         if (!bio_slabs)
2215                 panic("bio: can't allocate bios\n");
2216 
2217         bio_integrity_init();
2218         biovec_init_slabs();
2219 
2220         if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
2221                 panic("bio: can't allocate bios\n");
2222 
2223         if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
2224                 panic("bio: can't create integrity pool\n");
2225 
2226         return 0;
2227 }
2228 subsys_initcall(init_bio);