Lines Matching refs:bio
73 unsigned int sz = sizeof(struct bio) + extra_size; in bio_find_or_create_slab()
237 static void __bio_free(struct bio *bio) in __bio_free() argument
239 bio_disassociate_task(bio); in __bio_free()
241 if (bio_integrity(bio)) in __bio_free()
242 bio_integrity_free(bio); in __bio_free()
245 static void bio_free(struct bio *bio) in bio_free() argument
247 struct bio_set *bs = bio->bi_pool; in bio_free()
250 __bio_free(bio); in bio_free()
253 if (bio_flagged(bio, BIO_OWNS_VEC)) in bio_free()
254 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio)); in bio_free()
259 p = bio; in bio_free()
265 kfree(bio); in bio_free()
269 void bio_init(struct bio *bio) in bio_init() argument
271 memset(bio, 0, sizeof(*bio)); in bio_init()
272 atomic_set(&bio->__bi_remaining, 1); in bio_init()
273 atomic_set(&bio->__bi_cnt, 1); in bio_init()
287 void bio_reset(struct bio *bio) in bio_reset() argument
289 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); in bio_reset()
291 __bio_free(bio); in bio_reset()
293 memset(bio, 0, BIO_RESET_BYTES); in bio_reset()
294 bio->bi_flags = flags; in bio_reset()
295 atomic_set(&bio->__bi_remaining, 1); in bio_reset()
299 static void bio_chain_endio(struct bio *bio) in bio_chain_endio() argument
301 struct bio *parent = bio->bi_private; in bio_chain_endio()
303 parent->bi_error = bio->bi_error; in bio_chain_endio()
305 bio_put(bio); in bio_chain_endio()
312 static inline void bio_inc_remaining(struct bio *bio) in bio_inc_remaining() argument
314 bio_set_flag(bio, BIO_CHAIN); in bio_inc_remaining()
316 atomic_inc(&bio->__bi_remaining); in bio_inc_remaining()
330 void bio_chain(struct bio *bio, struct bio *parent) in bio_chain() argument
332 BUG_ON(bio->bi_private || bio->bi_end_io); in bio_chain()
334 bio->bi_private = parent; in bio_chain()
335 bio->bi_end_io = bio_chain_endio; in bio_chain()
343 struct bio *bio; in bio_alloc_rescue() local
347 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
350 if (!bio) in bio_alloc_rescue()
353 generic_make_request(bio); in bio_alloc_rescue()
360 struct bio *bio; in punt_bios_to_rescuer() local
376 while ((bio = bio_list_pop(current->bio_list))) in punt_bios_to_rescuer()
377 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
423 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) in bio_alloc_bioset()
430 struct bio *bio; in bio_alloc_bioset() local
437 p = kmalloc(sizeof(struct bio) + in bio_alloc_bioset()
484 bio = p + front_pad; in bio_alloc_bioset()
485 bio_init(bio); in bio_alloc_bioset()
498 bio_set_flag(bio, BIO_OWNS_VEC); in bio_alloc_bioset()
500 bvl = bio->bi_inline_vecs; in bio_alloc_bioset()
503 bio->bi_pool = bs; in bio_alloc_bioset()
504 bio->bi_flags |= idx << BIO_POOL_OFFSET; in bio_alloc_bioset()
505 bio->bi_max_vecs = nr_iovecs; in bio_alloc_bioset()
506 bio->bi_io_vec = bvl; in bio_alloc_bioset()
507 return bio; in bio_alloc_bioset()
515 void zero_fill_bio(struct bio *bio) in zero_fill_bio() argument
521 bio_for_each_segment(bv, bio, iter) { in zero_fill_bio()
538 void bio_put(struct bio *bio) in bio_put() argument
540 if (!bio_flagged(bio, BIO_REFFED)) in bio_put()
541 bio_free(bio); in bio_put()
543 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); in bio_put()
548 if (atomic_dec_and_test(&bio->__bi_cnt)) in bio_put()
549 bio_free(bio); in bio_put()
554 inline int bio_phys_segments(struct request_queue *q, struct bio *bio) in bio_phys_segments() argument
556 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) in bio_phys_segments()
557 blk_recount_segments(q, bio); in bio_phys_segments()
559 return bio->bi_phys_segments; in bio_phys_segments()
574 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) in __bio_clone_fast() argument
576 BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE); in __bio_clone_fast()
582 bio->bi_bdev = bio_src->bi_bdev; in __bio_clone_fast()
583 bio_set_flag(bio, BIO_CLONED); in __bio_clone_fast()
584 bio->bi_rw = bio_src->bi_rw; in __bio_clone_fast()
585 bio->bi_iter = bio_src->bi_iter; in __bio_clone_fast()
586 bio->bi_io_vec = bio_src->bi_io_vec; in __bio_clone_fast()
598 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) in bio_clone_fast() argument
600 struct bio *b; in bio_clone_fast()
606 __bio_clone_fast(b, bio); in bio_clone_fast()
608 if (bio_integrity(bio)) { in bio_clone_fast()
611 ret = bio_integrity_clone(b, bio, gfp_mask); in bio_clone_fast()
632 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, in bio_clone_bioset()
637 struct bio *bio; in bio_clone_bioset() local
661 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); in bio_clone_bioset()
662 if (!bio) in bio_clone_bioset()
665 bio->bi_bdev = bio_src->bi_bdev; in bio_clone_bioset()
666 bio->bi_rw = bio_src->bi_rw; in bio_clone_bioset()
667 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; in bio_clone_bioset()
668 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; in bio_clone_bioset()
670 if (bio->bi_rw & REQ_DISCARD) in bio_clone_bioset()
673 if (bio->bi_rw & REQ_WRITE_SAME) { in bio_clone_bioset()
674 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; in bio_clone_bioset()
679 bio->bi_io_vec[bio->bi_vcnt++] = bv; in bio_clone_bioset()
685 ret = bio_integrity_clone(bio, bio_src, gfp_mask); in bio_clone_bioset()
687 bio_put(bio); in bio_clone_bioset()
692 return bio; in bio_clone_bioset()
711 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page in bio_add_pc_page() argument
720 if (unlikely(bio_flagged(bio, BIO_CLONED))) in bio_add_pc_page()
723 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q)) in bio_add_pc_page()
731 if (bio->bi_vcnt > 0) { in bio_add_pc_page()
732 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_add_pc_page()
737 bio->bi_iter.bi_size += len; in bio_add_pc_page()
749 if (bio->bi_vcnt >= bio->bi_max_vecs) in bio_add_pc_page()
756 bvec = &bio->bi_io_vec[bio->bi_vcnt]; in bio_add_pc_page()
760 bio->bi_vcnt++; in bio_add_pc_page()
761 bio->bi_phys_segments++; in bio_add_pc_page()
762 bio->bi_iter.bi_size += len; in bio_add_pc_page()
769 while (bio->bi_phys_segments > queue_max_segments(q)) { in bio_add_pc_page()
775 blk_recount_segments(q, bio); in bio_add_pc_page()
779 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) in bio_add_pc_page()
780 bio_clear_flag(bio, BIO_SEG_VALID); in bio_add_pc_page()
789 bio->bi_vcnt--; in bio_add_pc_page()
790 bio->bi_iter.bi_size -= len; in bio_add_pc_page()
791 blk_recount_segments(q, bio); in bio_add_pc_page()
806 int bio_add_page(struct bio *bio, struct page *page, in bio_add_page() argument
814 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) in bio_add_page()
822 if (bio->bi_vcnt > 0) { in bio_add_page()
823 bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; in bio_add_page()
832 if (bio->bi_vcnt >= bio->bi_max_vecs) in bio_add_page()
835 bv = &bio->bi_io_vec[bio->bi_vcnt]; in bio_add_page()
840 bio->bi_vcnt++; in bio_add_page()
842 bio->bi_iter.bi_size += len; in bio_add_page()
852 static void submit_bio_wait_endio(struct bio *bio) in submit_bio_wait_endio() argument
854 struct submit_bio_ret *ret = bio->bi_private; in submit_bio_wait_endio()
856 ret->error = bio->bi_error; in submit_bio_wait_endio()
868 int submit_bio_wait(int rw, struct bio *bio) in submit_bio_wait() argument
874 bio->bi_private = &ret; in submit_bio_wait()
875 bio->bi_end_io = submit_bio_wait_endio; in submit_bio_wait()
876 submit_bio(rw, bio); in submit_bio_wait()
894 void bio_advance(struct bio *bio, unsigned bytes) in bio_advance() argument
896 if (bio_integrity(bio)) in bio_advance()
897 bio_integrity_advance(bio, bytes); in bio_advance()
899 bio_advance_iter(bio, &bio->bi_iter, bytes); in bio_advance()
913 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) in bio_alloc_pages() argument
918 bio_for_each_segment_all(bv, bio, i) { in bio_alloc_pages()
921 while (--bv >= bio->bi_io_vec) in bio_alloc_pages()
943 void bio_copy_data(struct bio *dst, struct bio *src) in bio_copy_data()
1015 static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter) in bio_copy_from_iter() argument
1020 bio_for_each_segment_all(bvec, bio, i) { in bio_copy_from_iter()
1046 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument
1051 bio_for_each_segment_all(bvec, bio, i) { in bio_copy_to_iter()
1069 static void bio_free_pages(struct bio *bio) in bio_free_pages() argument
1074 bio_for_each_segment_all(bvec, bio, i) in bio_free_pages()
1085 int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument
1087 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user()
1090 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { in bio_uncopy_user()
1098 else if (bio_data_dir(bio) == READ) in bio_uncopy_user()
1099 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user()
1101 bio_free_pages(bio); in bio_uncopy_user()
1104 bio_put(bio); in bio_uncopy_user()
1120 struct bio *bio_copy_user_iov(struct request_queue *q, in bio_copy_user_iov()
1127 struct bio *bio; in bio_copy_user_iov() local
1170 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_user_iov()
1171 if (!bio) in bio_copy_user_iov()
1175 bio->bi_rw |= REQ_WRITE; in bio_copy_user_iov()
1209 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) in bio_copy_user_iov()
1224 ret = bio_copy_from_iter(bio, *iter); in bio_copy_user_iov()
1229 bio->bi_private = bmd; in bio_copy_user_iov()
1230 return bio; in bio_copy_user_iov()
1233 bio_free_pages(bio); in bio_copy_user_iov()
1234 bio_put(bio); in bio_copy_user_iov()
1249 struct bio *bio_map_user_iov(struct request_queue *q, in bio_map_user_iov()
1256 struct bio *bio; in bio_map_user_iov() local
1285 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_map_user_iov()
1286 if (!bio) in bio_map_user_iov()
1323 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < in bio_map_user_iov()
1345 bio->bi_rw |= REQ_WRITE; in bio_map_user_iov()
1347 bio_set_flag(bio, BIO_USER_MAPPED); in bio_map_user_iov()
1355 bio_get(bio); in bio_map_user_iov()
1356 return bio; in bio_map_user_iov()
1366 bio_put(bio); in bio_map_user_iov()
1370 static void __bio_unmap_user(struct bio *bio) in __bio_unmap_user() argument
1378 bio_for_each_segment_all(bvec, bio, i) { in __bio_unmap_user()
1379 if (bio_data_dir(bio) == READ) in __bio_unmap_user()
1385 bio_put(bio); in __bio_unmap_user()
1397 void bio_unmap_user(struct bio *bio) in bio_unmap_user() argument
1399 __bio_unmap_user(bio); in bio_unmap_user()
1400 bio_put(bio); in bio_unmap_user()
1404 static void bio_map_kern_endio(struct bio *bio) in bio_map_kern_endio() argument
1406 bio_put(bio); in bio_map_kern_endio()
1419 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, in bio_map_kern()
1427 struct bio *bio; in bio_map_kern() local
1429 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_map_kern()
1430 if (!bio) in bio_map_kern()
1443 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, in bio_map_kern()
1446 bio_put(bio); in bio_map_kern()
1455 bio->bi_end_io = bio_map_kern_endio; in bio_map_kern()
1456 return bio; in bio_map_kern()
1460 static void bio_copy_kern_endio(struct bio *bio) in bio_copy_kern_endio() argument
1462 bio_free_pages(bio); in bio_copy_kern_endio()
1463 bio_put(bio); in bio_copy_kern_endio()
1466 static void bio_copy_kern_endio_read(struct bio *bio) in bio_copy_kern_endio_read() argument
1468 char *p = bio->bi_private; in bio_copy_kern_endio_read()
1472 bio_for_each_segment_all(bvec, bio, i) { in bio_copy_kern_endio_read()
1477 bio_copy_kern_endio(bio); in bio_copy_kern_endio_read()
1491 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, in bio_copy_kern()
1497 struct bio *bio; in bio_copy_kern() local
1508 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_kern()
1509 if (!bio) in bio_copy_kern()
1526 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) in bio_copy_kern()
1534 bio->bi_end_io = bio_copy_kern_endio_read; in bio_copy_kern()
1535 bio->bi_private = data; in bio_copy_kern()
1537 bio->bi_end_io = bio_copy_kern_endio; in bio_copy_kern()
1538 bio->bi_rw |= REQ_WRITE; in bio_copy_kern()
1541 return bio; in bio_copy_kern()
1544 bio_free_pages(bio); in bio_copy_kern()
1545 bio_put(bio); in bio_copy_kern()
1579 void bio_set_pages_dirty(struct bio *bio) in bio_set_pages_dirty() argument
1584 bio_for_each_segment_all(bvec, bio, i) { in bio_set_pages_dirty()
1592 static void bio_release_pages(struct bio *bio) in bio_release_pages() argument
1597 bio_for_each_segment_all(bvec, bio, i) { in bio_release_pages()
1620 static struct bio *bio_dirty_list;
1628 struct bio *bio; in bio_dirty_fn() local
1631 bio = bio_dirty_list; in bio_dirty_fn()
1635 while (bio) { in bio_dirty_fn()
1636 struct bio *next = bio->bi_private; in bio_dirty_fn()
1638 bio_set_pages_dirty(bio); in bio_dirty_fn()
1639 bio_release_pages(bio); in bio_dirty_fn()
1640 bio_put(bio); in bio_dirty_fn()
1641 bio = next; in bio_dirty_fn()
1645 void bio_check_pages_dirty(struct bio *bio) in bio_check_pages_dirty() argument
1651 bio_for_each_segment_all(bvec, bio, i) { in bio_check_pages_dirty()
1666 bio->bi_private = bio_dirty_list; in bio_check_pages_dirty()
1667 bio_dirty_list = bio; in bio_check_pages_dirty()
1671 bio_put(bio); in bio_check_pages_dirty()
1704 void bio_flush_dcache_pages(struct bio *bi) in bio_flush_dcache_pages()
1715 static inline bool bio_remaining_done(struct bio *bio) in bio_remaining_done() argument
1721 if (!bio_flagged(bio, BIO_CHAIN)) in bio_remaining_done()
1724 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); in bio_remaining_done()
1726 if (atomic_dec_and_test(&bio->__bi_remaining)) { in bio_remaining_done()
1727 bio_clear_flag(bio, BIO_CHAIN); in bio_remaining_done()
1743 void bio_endio(struct bio *bio) in bio_endio() argument
1745 while (bio) { in bio_endio()
1746 if (unlikely(!bio_remaining_done(bio))) in bio_endio()
1757 if (bio->bi_end_io == bio_chain_endio) { in bio_endio()
1758 struct bio *parent = bio->bi_private; in bio_endio()
1759 parent->bi_error = bio->bi_error; in bio_endio()
1760 bio_put(bio); in bio_endio()
1761 bio = parent; in bio_endio()
1763 if (bio->bi_end_io) in bio_endio()
1764 bio->bi_end_io(bio); in bio_endio()
1765 bio = NULL; in bio_endio()
1785 struct bio *bio_split(struct bio *bio, int sectors, in bio_split() argument
1788 struct bio *split = NULL; in bio_split()
1791 BUG_ON(sectors >= bio_sectors(bio)); in bio_split()
1797 if (bio->bi_rw & REQ_DISCARD) in bio_split()
1798 split = bio_clone_bioset(bio, gfp, bs); in bio_split()
1800 split = bio_clone_fast(bio, gfp, bs); in bio_split()
1810 bio_advance(bio, split->bi_iter.bi_size); in bio_split()
1822 void bio_trim(struct bio *bio, int offset, int size) in bio_trim() argument
1829 if (offset == 0 && size == bio->bi_iter.bi_size) in bio_trim()
1832 bio_clear_flag(bio, BIO_SEG_VALID); in bio_trim()
1834 bio_advance(bio, offset << 9); in bio_trim()
1836 bio->bi_iter.bi_size = size; in bio_trim()
1960 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) in bio_associate_blkcg() argument
1962 if (unlikely(bio->bi_css)) in bio_associate_blkcg()
1965 bio->bi_css = blkcg_css; in bio_associate_blkcg()
1983 int bio_associate_current(struct bio *bio) in bio_associate_current() argument
1987 if (bio->bi_css) in bio_associate_current()
1995 bio->bi_ioc = ioc; in bio_associate_current()
1996 bio->bi_css = task_get_css(current, io_cgrp_id); in bio_associate_current()
2005 void bio_disassociate_task(struct bio *bio) in bio_disassociate_task() argument
2007 if (bio->bi_ioc) { in bio_disassociate_task()
2008 put_io_context(bio->bi_ioc); in bio_disassociate_task()
2009 bio->bi_ioc = NULL; in bio_disassociate_task()
2011 if (bio->bi_css) { in bio_disassociate_task()
2012 css_put(bio->bi_css); in bio_disassociate_task()
2013 bio->bi_css = NULL; in bio_disassociate_task()