Lines Matching refs:bio
73 unsigned int sz = sizeof(struct bio) + extra_size; in bio_find_or_create_slab()
237 static void __bio_free(struct bio *bio) in __bio_free() argument
239 bio_disassociate_task(bio); in __bio_free()
241 if (bio_integrity(bio)) in __bio_free()
242 bio_integrity_free(bio); in __bio_free()
245 static void bio_free(struct bio *bio) in bio_free() argument
247 struct bio_set *bs = bio->bi_pool; in bio_free()
250 __bio_free(bio); in bio_free()
253 if (bio_flagged(bio, BIO_OWNS_VEC)) in bio_free()
254 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio)); in bio_free()
259 p = bio; in bio_free()
265 kfree(bio); in bio_free()
269 void bio_init(struct bio *bio) in bio_init() argument
271 memset(bio, 0, sizeof(*bio)); in bio_init()
272 bio->bi_flags = 1 << BIO_UPTODATE; in bio_init()
273 atomic_set(&bio->bi_remaining, 1); in bio_init()
274 atomic_set(&bio->bi_cnt, 1); in bio_init()
288 void bio_reset(struct bio *bio) in bio_reset() argument
290 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); in bio_reset()
292 __bio_free(bio); in bio_reset()
294 memset(bio, 0, BIO_RESET_BYTES); in bio_reset()
295 bio->bi_flags = flags|(1 << BIO_UPTODATE); in bio_reset()
296 atomic_set(&bio->bi_remaining, 1); in bio_reset()
300 static void bio_chain_endio(struct bio *bio, int error) in bio_chain_endio() argument
302 bio_endio(bio->bi_private, error); in bio_chain_endio()
303 bio_put(bio); in bio_chain_endio()
317 void bio_chain(struct bio *bio, struct bio *parent) in bio_chain() argument
319 BUG_ON(bio->bi_private || bio->bi_end_io); in bio_chain()
321 bio->bi_private = parent; in bio_chain()
322 bio->bi_end_io = bio_chain_endio; in bio_chain()
330 struct bio *bio; in bio_alloc_rescue() local
334 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
337 if (!bio) in bio_alloc_rescue()
340 generic_make_request(bio); in bio_alloc_rescue()
347 struct bio *bio; in punt_bios_to_rescuer() local
363 while ((bio = bio_list_pop(current->bio_list))) in punt_bios_to_rescuer()
364 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
410 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) in bio_alloc_bioset()
417 struct bio *bio; in bio_alloc_bioset() local
424 p = kmalloc(sizeof(struct bio) + in bio_alloc_bioset()
471 bio = p + front_pad; in bio_alloc_bioset()
472 bio_init(bio); in bio_alloc_bioset()
485 bio->bi_flags |= 1 << BIO_OWNS_VEC; in bio_alloc_bioset()
487 bvl = bio->bi_inline_vecs; in bio_alloc_bioset()
490 bio->bi_pool = bs; in bio_alloc_bioset()
491 bio->bi_flags |= idx << BIO_POOL_OFFSET; in bio_alloc_bioset()
492 bio->bi_max_vecs = nr_iovecs; in bio_alloc_bioset()
493 bio->bi_io_vec = bvl; in bio_alloc_bioset()
494 return bio; in bio_alloc_bioset()
502 void zero_fill_bio(struct bio *bio) in zero_fill_bio() argument
508 bio_for_each_segment(bv, bio, iter) { in zero_fill_bio()
525 void bio_put(struct bio *bio) in bio_put() argument
527 BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); in bio_put()
532 if (atomic_dec_and_test(&bio->bi_cnt)) in bio_put()
533 bio_free(bio); in bio_put()
537 inline int bio_phys_segments(struct request_queue *q, struct bio *bio) in bio_phys_segments() argument
539 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) in bio_phys_segments()
540 blk_recount_segments(q, bio); in bio_phys_segments()
542 return bio->bi_phys_segments; in bio_phys_segments()
557 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) in __bio_clone_fast() argument
559 BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE); in __bio_clone_fast()
565 bio->bi_bdev = bio_src->bi_bdev; in __bio_clone_fast()
566 bio->bi_flags |= 1 << BIO_CLONED; in __bio_clone_fast()
567 bio->bi_rw = bio_src->bi_rw; in __bio_clone_fast()
568 bio->bi_iter = bio_src->bi_iter; in __bio_clone_fast()
569 bio->bi_io_vec = bio_src->bi_io_vec; in __bio_clone_fast()
581 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) in bio_clone_fast() argument
583 struct bio *b; in bio_clone_fast()
589 __bio_clone_fast(b, bio); in bio_clone_fast()
591 if (bio_integrity(bio)) { in bio_clone_fast()
594 ret = bio_integrity_clone(b, bio, gfp_mask); in bio_clone_fast()
615 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, in bio_clone_bioset()
620 struct bio *bio; in bio_clone_bioset() local
644 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); in bio_clone_bioset()
645 if (!bio) in bio_clone_bioset()
648 bio->bi_bdev = bio_src->bi_bdev; in bio_clone_bioset()
649 bio->bi_rw = bio_src->bi_rw; in bio_clone_bioset()
650 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; in bio_clone_bioset()
651 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; in bio_clone_bioset()
653 if (bio->bi_rw & REQ_DISCARD) in bio_clone_bioset()
656 if (bio->bi_rw & REQ_WRITE_SAME) { in bio_clone_bioset()
657 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; in bio_clone_bioset()
662 bio->bi_io_vec[bio->bi_vcnt++] = bv; in bio_clone_bioset()
668 ret = bio_integrity_clone(bio, bio_src, gfp_mask); in bio_clone_bioset()
670 bio_put(bio); in bio_clone_bioset()
675 return bio; in bio_clone_bioset()
702 static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page in __bio_add_page() argument
712 if (unlikely(bio_flagged(bio, BIO_CLONED))) in __bio_add_page()
715 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) in __bio_add_page()
723 if (bio->bi_vcnt > 0) { in __bio_add_page()
724 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; in __bio_add_page()
737 .bi_bdev = bio->bi_bdev, in __bio_add_page()
738 .bi_sector = bio->bi_iter.bi_sector, in __bio_add_page()
739 .bi_size = bio->bi_iter.bi_size - in __bio_add_page()
741 .bi_rw = bio->bi_rw, in __bio_add_page()
750 bio->bi_iter.bi_size += len; in __bio_add_page()
763 if (bio->bi_vcnt >= bio->bi_max_vecs) in __bio_add_page()
770 bvec = &bio->bi_io_vec[bio->bi_vcnt]; in __bio_add_page()
774 bio->bi_vcnt++; in __bio_add_page()
775 bio->bi_phys_segments++; in __bio_add_page()
776 bio->bi_iter.bi_size += len; in __bio_add_page()
783 while (bio->bi_phys_segments > queue_max_segments(q)) { in __bio_add_page()
789 blk_recount_segments(q, bio); in __bio_add_page()
799 .bi_bdev = bio->bi_bdev, in __bio_add_page()
800 .bi_sector = bio->bi_iter.bi_sector, in __bio_add_page()
801 .bi_size = bio->bi_iter.bi_size - len, in __bio_add_page()
802 .bi_rw = bio->bi_rw, in __bio_add_page()
814 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) in __bio_add_page()
815 bio->bi_flags &= ~(1 << BIO_SEG_VALID); in __bio_add_page()
824 bio->bi_vcnt--; in __bio_add_page()
825 bio->bi_iter.bi_size -= len; in __bio_add_page()
826 blk_recount_segments(q, bio); in __bio_add_page()
845 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, in bio_add_pc_page() argument
848 return __bio_add_page(q, bio, page, len, offset, in bio_add_pc_page()
865 int bio_add_page(struct bio *bio, struct page *page, unsigned int len, in bio_add_page() argument
868 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_add_page()
871 max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); in bio_add_page()
872 if ((max_sectors < (len >> 9)) && !bio->bi_iter.bi_size) in bio_add_page()
875 return __bio_add_page(q, bio, page, len, offset, max_sectors); in bio_add_page()
884 static void submit_bio_wait_endio(struct bio *bio, int error) in submit_bio_wait_endio() argument
886 struct submit_bio_ret *ret = bio->bi_private; in submit_bio_wait_endio()
900 int submit_bio_wait(int rw, struct bio *bio) in submit_bio_wait() argument
906 bio->bi_private = &ret; in submit_bio_wait()
907 bio->bi_end_io = submit_bio_wait_endio; in submit_bio_wait()
908 submit_bio(rw, bio); in submit_bio_wait()
926 void bio_advance(struct bio *bio, unsigned bytes) in bio_advance() argument
928 if (bio_integrity(bio)) in bio_advance()
929 bio_integrity_advance(bio, bytes); in bio_advance()
931 bio_advance_iter(bio, &bio->bi_iter, bytes); in bio_advance()
945 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) in bio_alloc_pages() argument
950 bio_for_each_segment_all(bv, bio, i) { in bio_alloc_pages()
953 while (--bv >= bio->bi_io_vec) in bio_alloc_pages()
975 void bio_copy_data(struct bio *dst, struct bio *src) in bio_copy_data()
1047 static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter) in bio_copy_from_iter() argument
1052 bio_for_each_segment_all(bvec, bio, i) { in bio_copy_from_iter()
1078 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument
1083 bio_for_each_segment_all(bvec, bio, i) { in bio_copy_to_iter()
1101 static void bio_free_pages(struct bio *bio) in bio_free_pages() argument
1106 bio_for_each_segment_all(bvec, bio, i) in bio_free_pages()
1117 int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument
1119 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user()
1122 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { in bio_uncopy_user()
1130 else if (bio_data_dir(bio) == READ) in bio_uncopy_user()
1131 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user()
1133 bio_free_pages(bio); in bio_uncopy_user()
1136 bio_put(bio); in bio_uncopy_user()
1152 struct bio *bio_copy_user_iov(struct request_queue *q, in bio_copy_user_iov()
1159 struct bio *bio; in bio_copy_user_iov() local
1202 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_user_iov()
1203 if (!bio) in bio_copy_user_iov()
1207 bio->bi_rw |= REQ_WRITE; in bio_copy_user_iov()
1241 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) in bio_copy_user_iov()
1256 ret = bio_copy_from_iter(bio, *iter); in bio_copy_user_iov()
1261 bio->bi_private = bmd; in bio_copy_user_iov()
1262 return bio; in bio_copy_user_iov()
1265 bio_free_pages(bio); in bio_copy_user_iov()
1266 bio_put(bio); in bio_copy_user_iov()
1281 struct bio *bio_map_user_iov(struct request_queue *q, in bio_map_user_iov()
1288 struct bio *bio; in bio_map_user_iov() local
1317 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_map_user_iov()
1318 if (!bio) in bio_map_user_iov()
1355 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < in bio_map_user_iov()
1377 bio->bi_rw |= REQ_WRITE; in bio_map_user_iov()
1379 bio->bi_flags |= (1 << BIO_USER_MAPPED); in bio_map_user_iov()
1387 bio_get(bio); in bio_map_user_iov()
1388 return bio; in bio_map_user_iov()
1398 bio_put(bio); in bio_map_user_iov()
1402 static void __bio_unmap_user(struct bio *bio) in __bio_unmap_user() argument
1410 bio_for_each_segment_all(bvec, bio, i) { in __bio_unmap_user()
1411 if (bio_data_dir(bio) == READ) in __bio_unmap_user()
1417 bio_put(bio); in __bio_unmap_user()
1429 void bio_unmap_user(struct bio *bio) in bio_unmap_user() argument
1431 __bio_unmap_user(bio); in bio_unmap_user()
1432 bio_put(bio); in bio_unmap_user()
1436 static void bio_map_kern_endio(struct bio *bio, int err) in bio_map_kern_endio() argument
1438 bio_put(bio); in bio_map_kern_endio()
1451 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, in bio_map_kern()
1459 struct bio *bio; in bio_map_kern() local
1461 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_map_kern()
1462 if (!bio) in bio_map_kern()
1475 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, in bio_map_kern()
1478 bio_put(bio); in bio_map_kern()
1487 bio->bi_end_io = bio_map_kern_endio; in bio_map_kern()
1488 return bio; in bio_map_kern()
1492 static void bio_copy_kern_endio(struct bio *bio, int err) in bio_copy_kern_endio() argument
1494 bio_free_pages(bio); in bio_copy_kern_endio()
1495 bio_put(bio); in bio_copy_kern_endio()
1498 static void bio_copy_kern_endio_read(struct bio *bio, int err) in bio_copy_kern_endio_read() argument
1500 char *p = bio->bi_private; in bio_copy_kern_endio_read()
1504 bio_for_each_segment_all(bvec, bio, i) { in bio_copy_kern_endio_read()
1509 bio_copy_kern_endio(bio, err); in bio_copy_kern_endio_read()
1523 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, in bio_copy_kern()
1529 struct bio *bio; in bio_copy_kern() local
1540 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_kern()
1541 if (!bio) in bio_copy_kern()
1558 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) in bio_copy_kern()
1566 bio->bi_end_io = bio_copy_kern_endio_read; in bio_copy_kern()
1567 bio->bi_private = data; in bio_copy_kern()
1569 bio->bi_end_io = bio_copy_kern_endio; in bio_copy_kern()
1570 bio->bi_rw |= REQ_WRITE; in bio_copy_kern()
1573 return bio; in bio_copy_kern()
1576 bio_free_pages(bio); in bio_copy_kern()
1577 bio_put(bio); in bio_copy_kern()
1611 void bio_set_pages_dirty(struct bio *bio) in bio_set_pages_dirty() argument
1616 bio_for_each_segment_all(bvec, bio, i) { in bio_set_pages_dirty()
1624 static void bio_release_pages(struct bio *bio) in bio_release_pages() argument
1629 bio_for_each_segment_all(bvec, bio, i) { in bio_release_pages()
1652 static struct bio *bio_dirty_list;
1660 struct bio *bio; in bio_dirty_fn() local
1663 bio = bio_dirty_list; in bio_dirty_fn()
1667 while (bio) { in bio_dirty_fn()
1668 struct bio *next = bio->bi_private; in bio_dirty_fn()
1670 bio_set_pages_dirty(bio); in bio_dirty_fn()
1671 bio_release_pages(bio); in bio_dirty_fn()
1672 bio_put(bio); in bio_dirty_fn()
1673 bio = next; in bio_dirty_fn()
1677 void bio_check_pages_dirty(struct bio *bio) in bio_check_pages_dirty() argument
1683 bio_for_each_segment_all(bvec, bio, i) { in bio_check_pages_dirty()
1698 bio->bi_private = bio_dirty_list; in bio_check_pages_dirty()
1699 bio_dirty_list = bio; in bio_check_pages_dirty()
1703 bio_put(bio); in bio_check_pages_dirty()
1736 void bio_flush_dcache_pages(struct bio *bi) in bio_flush_dcache_pages()
1761 void bio_endio(struct bio *bio, int error) in bio_endio() argument
1763 while (bio) { in bio_endio()
1764 BUG_ON(atomic_read(&bio->bi_remaining) <= 0); in bio_endio()
1767 clear_bit(BIO_UPTODATE, &bio->bi_flags); in bio_endio()
1768 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) in bio_endio()
1771 if (!atomic_dec_and_test(&bio->bi_remaining)) in bio_endio()
1782 if (bio->bi_end_io == bio_chain_endio) { in bio_endio()
1783 struct bio *parent = bio->bi_private; in bio_endio()
1784 bio_put(bio); in bio_endio()
1785 bio = parent; in bio_endio()
1787 if (bio->bi_end_io) in bio_endio()
1788 bio->bi_end_io(bio, error); in bio_endio()
1789 bio = NULL; in bio_endio()
1803 void bio_endio_nodec(struct bio *bio, int error) in bio_endio_nodec() argument
1805 atomic_inc(&bio->bi_remaining); in bio_endio_nodec()
1806 bio_endio(bio, error); in bio_endio_nodec()
1824 struct bio *bio_split(struct bio *bio, int sectors, in bio_split() argument
1827 struct bio *split = NULL; in bio_split()
1830 BUG_ON(sectors >= bio_sectors(bio)); in bio_split()
1836 if (bio->bi_rw & REQ_DISCARD) in bio_split()
1837 split = bio_clone_bioset(bio, gfp, bs); in bio_split()
1839 split = bio_clone_fast(bio, gfp, bs); in bio_split()
1849 bio_advance(bio, split->bi_iter.bi_size); in bio_split()
1861 void bio_trim(struct bio *bio, int offset, int size) in bio_trim() argument
1868 if (offset == 0 && size == bio->bi_iter.bi_size) in bio_trim()
1871 clear_bit(BIO_SEG_VALID, &bio->bi_flags); in bio_trim()
1873 bio_advance(bio, offset << 9); in bio_trim()
1875 bio->bi_iter.bi_size = size; in bio_trim()
1999 int bio_associate_current(struct bio *bio) in bio_associate_current() argument
2004 if (bio->bi_ioc) in bio_associate_current()
2013 bio->bi_ioc = ioc; in bio_associate_current()
2019 bio->bi_css = css; in bio_associate_current()
2029 void bio_disassociate_task(struct bio *bio) in bio_disassociate_task() argument
2031 if (bio->bi_ioc) { in bio_disassociate_task()
2032 put_io_context(bio->bi_ioc); in bio_disassociate_task()
2033 bio->bi_ioc = NULL; in bio_disassociate_task()
2035 if (bio->bi_css) { in bio_disassociate_task()
2036 css_put(bio->bi_css); in bio_disassociate_task()
2037 bio->bi_css = NULL; in bio_disassociate_task()