Home
last modified time | relevance | path

Searched refs:bio (Results 1 – 200 of 261) sorted by relevance

12

/linux-4.4.14/include/linux/
Dbio.h51 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) argument
52 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) argument
54 #define bio_set_prio(bio, prio) do { \ argument
56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
83 #define bio_iter_iovec(bio, iter) \ argument
84 bvec_iter_bvec((bio)->bi_io_vec, (iter))
86 #define bio_iter_page(bio, iter) \ argument
87 bvec_iter_page((bio)->bi_io_vec, (iter))
88 #define bio_iter_len(bio, iter) \ argument
[all …]
Dblkdev.h108 struct bio *bio; member
109 struct bio *biotail;
212 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
672 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) in blk_write_same_mergeable()
713 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
719 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) in blk_queue_bounce() argument
735 struct bio *bio; member
742 if ((rq->bio)) \
743 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
746 __rq_for_each_bio(_iter.bio, _rq) \
[all …]
Dblk-cgroup.h231 static inline struct blkcg *bio_blkcg(struct bio *bio) in bio_blkcg() argument
233 if (bio && bio->bi_css) in bio_blkcg()
234 return css_to_blkcg(bio->bi_css); in bio_blkcg()
426 struct bio *bio) in blk_get_rl() argument
433 blkcg = bio_blkcg(bio); in blk_get_rl()
687 struct bio *bio);
690 struct bio *bio) { return false; } in blk_throtl_bio() argument
694 struct bio *bio) in blkcg_bio_issue_check() argument
701 blkcg = bio_blkcg(bio); in blkcg_bio_issue_check()
712 throtl = blk_throtl_bio(q, blkg, bio); in blkcg_bio_issue_check()
[all …]
Ddevice-mapper.h48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
66 struct bio *bio, int error);
200 typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
296 struct bio clone;
299 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size) in dm_per_bio_data() argument
301 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; in dm_per_bio_data()
304 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) in dm_bio_from_per_bio_data()
306 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); in dm_bio_from_per_bio_data()
309 static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio) in dm_bio_get_target_bio_nr() argument
311 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr()
[all …]
Ddm-region-hash.h50 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio);
79 void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
81 void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
Delevator.h13 struct bio *);
19 typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
22 struct request *, struct bio *);
34 struct bio *, gfp_t);
126 extern int elv_merge(struct request_queue *, struct request **, struct bio *);
131 struct bio *);
140 struct bio *bio, gfp_t gfp_mask);
160 extern bool elv_rq_merge_ok(struct request *, struct bio *);
Dblk_types.h11 struct bio;
17 typedef void (bio_end_io_t) (struct bio *);
18 typedef void (bio_destructor_t) (struct bio *);
46 struct bio { struct
47 struct bio *bi_next; /* request queue link */ argument
110 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
138 #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) argument
Dpktcdvd.h117 struct bio *w_bio; /* The bio we will send to the real CD */
132 struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
145 struct bio *bio; member
150 struct bio *bio; /* Original read request bio */ member
Dwriteback.h258 static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) in wbc_init_bio() argument
267 bio_associate_blkcg(bio, wbc->wb->blkcg_css); in wbc_init_bio()
296 static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) in wbc_init_bio() argument
Ddm-io.h44 struct bio *bio; member
Dswap.h18 struct bio;
384 extern void end_swap_bio_write(struct bio *bio);
Dlightnvm.h155 struct bio *bio; member
348 typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
/linux-4.4.14/block/
Dbio.c73 unsigned int sz = sizeof(struct bio) + extra_size; in bio_find_or_create_slab()
237 static void __bio_free(struct bio *bio) in __bio_free() argument
239 bio_disassociate_task(bio); in __bio_free()
241 if (bio_integrity(bio)) in __bio_free()
242 bio_integrity_free(bio); in __bio_free()
245 static void bio_free(struct bio *bio) in bio_free() argument
247 struct bio_set *bs = bio->bi_pool; in bio_free()
250 __bio_free(bio); in bio_free()
253 if (bio_flagged(bio, BIO_OWNS_VEC)) in bio_free()
254 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio)); in bio_free()
[all …]
Dblk-merge.c12 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split()
13 struct bio *bio, in blk_bio_discard_split() argument
35 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split()
46 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split()
52 return bio_split(bio, split_sectors, GFP_NOIO, bs); in blk_bio_discard_split()
55 static struct bio *blk_bio_write_same_split(struct request_queue *q, in blk_bio_write_same_split()
56 struct bio *bio, in blk_bio_write_same_split() argument
65 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split()
68 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); in blk_bio_write_same_split()
72 struct bio *bio) in get_max_io_size() argument
[all …]
Dblk-map.c31 struct bio *bio) in blk_rq_append_bio() argument
33 if (!rq->bio) in blk_rq_append_bio()
34 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio()
35 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio()
38 rq->biotail->bi_next = bio; in blk_rq_append_bio()
39 rq->biotail = bio; in blk_rq_append_bio()
41 rq->__data_len += bio->bi_iter.bi_size; in blk_rq_append_bio()
46 static int __blk_rq_unmap_user(struct bio *bio) in __blk_rq_unmap_user() argument
50 if (bio) { in __blk_rq_unmap_user()
51 if (bio_flagged(bio, BIO_USER_MAPPED)) in __blk_rq_unmap_user()
[all …]
Dbounce.c102 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq()
126 static void bounce_end_io(struct bio *bio, mempool_t *pool) in bounce_end_io() argument
128 struct bio *bio_orig = bio->bi_private; in bounce_end_io()
136 bio_for_each_segment_all(bvec, bio, i) { in bounce_end_io()
146 bio_orig->bi_error = bio->bi_error; in bounce_end_io()
148 bio_put(bio); in bounce_end_io()
151 static void bounce_end_io_write(struct bio *bio) in bounce_end_io_write() argument
153 bounce_end_io(bio, page_pool); in bounce_end_io_write()
156 static void bounce_end_io_write_isa(struct bio *bio) in bounce_end_io_write_isa() argument
159 bounce_end_io(bio, isa_page_pool); in bounce_end_io_write_isa()
[all …]
Dbio-integrity.c50 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, in bio_integrity_alloc() argument
55 struct bio_set *bs = bio->bi_pool; in bio_integrity_alloc()
85 bip->bip_bio = bio; in bio_integrity_alloc()
86 bio->bi_integrity = bip; in bio_integrity_alloc()
87 bio->bi_rw |= REQ_INTEGRITY; in bio_integrity_alloc()
103 void bio_integrity_free(struct bio *bio) in bio_integrity_free() argument
105 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_free()
106 struct bio_set *bs = bio->bi_pool; in bio_integrity_free()
122 bio->bi_integrity = NULL; in bio_integrity_free()
135 int bio_integrity_add_page(struct bio *bio, struct page *page, in bio_integrity_add_page() argument
[all …]
Dblk-lib.c18 static void bio_batch_end_io(struct bio *bio) in bio_batch_end_io() argument
20 struct bio_batch *bb = bio->bi_private; in bio_batch_end_io()
22 if (bio->bi_error && bio->bi_error != -EOPNOTSUPP) in bio_batch_end_io()
23 bb->error = bio->bi_error; in bio_batch_end_io()
26 bio_put(bio); in bio_batch_end_io()
49 struct bio *bio; in blkdev_issue_discard() local
78 bio = bio_alloc(gfp_mask, 1); in blkdev_issue_discard()
79 if (!bio) { in blkdev_issue_discard()
101 bio->bi_iter.bi_sector = sector; in blkdev_issue_discard()
102 bio->bi_end_io = bio_batch_end_io; in blkdev_issue_discard()
[all …]
Dblk-core.c142 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument
146 bio->bi_error = error; in req_bio_endio()
149 bio_set_flag(bio, BIO_QUIET); in req_bio_endio()
151 bio_advance(bio, nbytes); in req_bio_endio()
154 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) in req_bio_endio()
155 bio_endio(bio); in req_bio_endio()
170 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
828 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
1015 static bool blk_rq_should_init_elevator(struct bio *bio) in blk_rq_should_init_elevator() argument
1017 if (!bio) in blk_rq_should_init_elevator()
[all …]
Dblk-throttle.c250 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, in throtl_qnode_add_bio() argument
253 bio_list_add(&qn->bios, bio); in throtl_qnode_add_bio()
264 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued()
267 struct bio *bio; in throtl_peek_queued() local
272 bio = bio_list_peek(&qn->bios); in throtl_peek_queued()
273 WARN_ON_ONCE(!bio); in throtl_peek_queued()
274 return bio; in throtl_peek_queued()
291 static struct bio *throtl_pop_queued(struct list_head *queued, in throtl_pop_queued()
295 struct bio *bio; in throtl_pop_queued() local
300 bio = bio_list_pop(&qn->bios); in throtl_pop_queued()
[all …]
Dblk.h64 void init_request_from_bio(struct request *req, struct bio *bio);
66 struct bio *bio);
68 struct bio *bio);
103 struct bio *bio);
105 struct bio *bio);
106 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
210 struct bio *bio);
212 struct bio *bio);
219 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
220 int blk_try_merge(struct request *rq, struct bio *bio);
Dblk-integrity.c41 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) in blk_rq_count_integrity_sg() argument
49 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_count_integrity_sg()
86 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, in blk_rq_map_integrity_sg() argument
95 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_map_integrity_sg()
195 if (bio_integrity(req->bio)->bip_flags != in blk_integrity_merge_rq()
196 bio_integrity(next->bio)->bip_flags) in blk_integrity_merge_rq()
203 if (integrity_req_gap_back_merge(req, next->bio)) in blk_integrity_merge_rq()
211 struct bio *bio) in blk_integrity_merge_bio() argument
214 struct bio *next = bio->bi_next; in blk_integrity_merge_bio()
216 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) in blk_integrity_merge_bio()
[all …]
Dblk-mq.c651 struct blk_mq_ctx *ctx, struct bio *bio) in blk_mq_attempt_merge() argument
662 if (!blk_rq_merge_ok(rq, bio)) in blk_mq_attempt_merge()
665 el_ret = blk_try_merge(rq, bio); in blk_mq_attempt_merge()
667 if (bio_attempt_back_merge(q, rq, bio)) { in blk_mq_attempt_merge()
673 if (bio_attempt_front_merge(q, rq, bio)) { in blk_mq_attempt_merge()
1115 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) in blk_mq_bio_to_request() argument
1117 init_request_from_bio(rq, bio); in blk_mq_bio_to_request()
1131 struct request *rq, struct bio *bio) in blk_mq_merge_queue_io() argument
1133 if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) { in blk_mq_merge_queue_io()
1134 blk_mq_bio_to_request(rq, bio); in blk_mq_merge_queue_io()
[all …]
Delevator.c56 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_merge() argument
62 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); in elv_iosched_allow_merge()
70 bool elv_rq_merge_ok(struct request *rq, struct bio *bio) in elv_rq_merge_ok() argument
72 if (!blk_rq_merge_ok(rq, bio)) in elv_rq_merge_ok()
75 if (!elv_iosched_allow_merge(rq, bio)) in elv_rq_merge_ok()
411 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) in elv_merge() argument
423 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in elv_merge()
429 if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) { in elv_merge()
430 ret = blk_try_merge(q->last_merge, bio); in elv_merge()
443 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); in elv_merge()
[all …]
Dblk-flush.c126 rq->bio = rq->biotail; in blk_flush_restore_request()
413 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ in blk_insert_flush()
466 struct bio *bio; in blkdev_issue_flush() local
485 bio = bio_alloc(gfp_mask, 0); in blkdev_issue_flush()
486 bio->bi_bdev = bdev; in blkdev_issue_flush()
488 ret = submit_bio_wait(WRITE_FLUSH, bio); in blkdev_issue_flush()
496 *error_sector = bio->bi_iter.bi_sector; in blkdev_issue_flush()
498 bio_put(bio); in blkdev_issue_flush()
Dbsg.c84 struct bio *bio; member
85 struct bio *bidi_bio;
292 blk_rq_unmap_user(next_rq->bio); in bsg_map_hdr()
309 bd->name, rq, bc, bc->bio, uptodate); in bsg_rq_end_io()
334 bc->bio = rq->bio; in bsg_add_command()
336 bc->bidi_bio = rq->next_rq->bio; in bsg_add_command()
394 struct bio *bio, struct bio *bidi_bio) in blk_complete_sgv4_hdr_rq() argument
398 dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); in blk_complete_sgv4_hdr_rq()
441 blk_rq_unmap_user(bio); in blk_complete_sgv4_hdr_rq()
501 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, in bsg_complete_all_commands()
[all …]
Ddeadline-iosched.c125 deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) in deadline_merge() argument
135 sector_t sector = bio_end_sector(bio); in deadline_merge()
137 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); in deadline_merge()
141 if (elv_rq_merge_ok(__rq, bio)) { in deadline_merge()
DMakefile5 obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
24 obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
Dscsi_ioctl.c252 struct bio *bio) in blk_complete_sghdr_rq() argument
279 r = blk_rq_unmap_user(bio); in blk_complete_sghdr_rq()
295 struct bio *bio; in sg_io() local
356 bio = rq->bio; in sg_io()
372 ret = blk_complete_sghdr_rq(rq, hdr, bio); in sg_io()
Dcfq-iosched.c863 struct cfq_io_cq *cic, struct bio *bio);
899 static inline bool cfq_bio_sync(struct bio *bio) in cfq_bio_sync() argument
901 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); in cfq_bio_sync()
2459 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) in cfq_find_rq_fmerge() argument
2469 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); in cfq_find_rq_fmerge()
2471 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio)); in cfq_find_rq_fmerge()
2516 struct bio *bio) in cfq_merge() argument
2521 __rq = cfq_find_rq_fmerge(cfqd, bio); in cfq_merge()
2522 if (__rq && elv_rq_merge_ok(__rq, bio)) { in cfq_merge()
2541 struct bio *bio) in cfq_bio_merged() argument
[all …]
Dbsg-lib.c132 if (req->bio) { in bsg_create_job()
137 if (rsp && rsp->bio) { in bsg_create_job()
/linux-4.4.14/drivers/md/bcache/
Drequest.c29 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) in cache_mode() argument
34 static bool verify(struct cached_dev *dc, struct bio *bio) in verify() argument
39 static void bio_csum(struct bio *bio, struct bkey *k) in bio_csum() argument
45 bio_for_each_segment(bv, bio, iter) { in bio_csum()
121 struct bio *bio = op->bio; in bch_data_invalidate() local
124 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate()
126 while (bio_sectors(bio)) { in bch_data_invalidate()
127 unsigned sectors = min(bio_sectors(bio), in bch_data_invalidate()
133 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate()
134 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate()
[all …]
Dio.c16 void bch_bbio_free(struct bio *bio, struct cache_set *c) in bch_bbio_free() argument
18 struct bbio *b = container_of(bio, struct bbio, bio); in bch_bbio_free()
22 struct bio *bch_bbio_alloc(struct cache_set *c) in bch_bbio_alloc()
25 struct bio *bio = &b->bio; in bch_bbio_alloc() local
27 bio_init(bio); in bch_bbio_alloc()
28 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; in bch_bbio_alloc()
29 bio->bi_max_vecs = bucket_pages(c); in bch_bbio_alloc()
30 bio->bi_io_vec = bio->bi_inline_vecs; in bch_bbio_alloc()
32 return bio; in bch_bbio_alloc()
35 void __bch_submit_bbio(struct bio *bio, struct cache_set *c) in __bch_submit_bbio() argument
[all …]
Dmovinggc.c18 struct bbio bio; member
46 struct bio *bio = &io->bio.bio; in write_moving_finish() local
50 bio_for_each_segment_all(bv, bio, i) in write_moving_finish()
63 static void read_moving_endio(struct bio *bio) in read_moving_endio() argument
65 struct bbio *b = container_of(bio, struct bbio, bio); in read_moving_endio()
66 struct moving_io *io = container_of(bio->bi_private, in read_moving_endio()
69 if (bio->bi_error) in read_moving_endio()
70 io->op.error = bio->bi_error; in read_moving_endio()
76 bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move"); in read_moving_endio()
81 struct bio *bio = &io->bio.bio; in moving_init() local
[all …]
Dwriteback.c102 struct bio bio; member
108 struct bio *bio = &io->bio; in dirty_init() local
110 bio_init(bio); in dirty_init()
112 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); in dirty_init()
114 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; in dirty_init()
115 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); in dirty_init()
116 bio->bi_private = w; in dirty_init()
117 bio->bi_io_vec = bio->bi_inline_vecs; in dirty_init()
118 bch_bio_map(bio, NULL); in dirty_init()
130 struct keybuf_key *w = io->bio.bi_private; in write_dirty_finish()
[all …]
Djournal.c27 static void journal_read_endio(struct bio *bio) in journal_read_endio() argument
29 struct closure *cl = bio->bi_private; in journal_read_endio()
37 struct bio *bio = &ja->bio; in journal_read_bucket() local
54 bio_reset(bio); in journal_read_bucket()
55 bio->bi_iter.bi_sector = bucket + offset; in journal_read_bucket()
56 bio->bi_bdev = ca->bdev; in journal_read_bucket()
57 bio->bi_rw = READ; in journal_read_bucket()
58 bio->bi_iter.bi_size = len << 9; in journal_read_bucket()
60 bio->bi_end_io = journal_read_endio; in journal_read_bucket()
61 bio->bi_private = &cl; in journal_read_bucket()
[all …]
Ddebug.c34 struct bio *bio; in bch_btree_verify() local
51 bio = bch_bbio_alloc(b->c); in bch_btree_verify()
52 bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; in bch_btree_verify()
53 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in bch_btree_verify()
54 bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; in bch_btree_verify()
55 bch_bio_map(bio, sorted); in bch_btree_verify()
57 submit_bio_wait(REQ_META|READ_SYNC, bio); in bch_btree_verify()
58 bch_bbio_free(bio, b->c); in bch_btree_verify()
105 void bch_data_verify(struct cached_dev *dc, struct bio *bio) in bch_data_verify() argument
108 struct bio *check; in bch_data_verify()
[all …]
Ddebug.h4 struct bio;
11 void bch_data_verify(struct cached_dev *, struct bio *);
20 static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} in bch_data_verify() argument
Dsuper.c201 static void write_bdev_super_endio(struct bio *bio) in write_bdev_super_endio() argument
203 struct cached_dev *dc = bio->bi_private; in write_bdev_super_endio()
209 static void __write_super(struct cache_sb *sb, struct bio *bio) in __write_super() argument
211 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); in __write_super()
214 bio->bi_iter.bi_sector = SB_SECTOR; in __write_super()
215 bio->bi_rw = REQ_SYNC|REQ_META; in __write_super()
216 bio->bi_iter.bi_size = SB_SIZE; in __write_super()
217 bch_bio_map(bio, NULL); in __write_super()
241 submit_bio(REQ_WRITE, bio); in __write_super()
254 struct bio *bio = &dc->sb_bio; in bch_write_bdev_super() local
[all …]
Dwriteback.h42 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, in should_writeback() argument
53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, in should_writeback()
54 bio_sectors(bio))) in should_writeback()
60 return bio->bi_rw & REQ_SYNC || in should_writeback()
Dutil.c225 void bch_bio_map(struct bio *bio, void *base) in bch_bio_map() argument
227 size_t size = bio->bi_iter.bi_size; in bch_bio_map()
228 struct bio_vec *bv = bio->bi_io_vec; in bch_bio_map()
230 BUG_ON(!bio->bi_iter.bi_size); in bch_bio_map()
231 BUG_ON(bio->bi_vcnt); in bch_bio_map()
236 for (; size; bio->bi_vcnt++, bv++) { in bch_bio_map()
Dbcache.h276 struct bio *, unsigned);
296 struct bio sb_bio;
383 struct bio sb_bio;
677 struct bio bio; member
853 void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
855 void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
856 void bch_bbio_free(struct bio *, struct cache_set *);
857 struct bio *bch_bbio_alloc(struct cache_set *);
859 void __bch_submit_bbio(struct bio *, struct cache_set *);
860 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
Djournal.h148 struct bio discard_bio;
152 struct bio bio; member
Dbtree.c281 static void btree_node_read_endio(struct bio *bio) in btree_node_read_endio() argument
283 struct closure *cl = bio->bi_private; in btree_node_read_endio()
291 struct bio *bio; in bch_btree_node_read() local
297 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
298 bio->bi_rw = REQ_META|READ_SYNC; in bch_btree_node_read()
299 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; in bch_btree_node_read()
300 bio->bi_end_io = btree_node_read_endio; in bch_btree_node_read()
301 bio->bi_private = &cl; in bch_btree_node_read()
303 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
305 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
[all …]
Drequest.h7 struct bio *bio; member
Dbtree.h146 struct bio *bio; member
Dutil.h567 void bch_bio_map(struct bio *bio, void *base);
574 #define closure_bio_submit(bio, cl) \ argument
577 generic_make_request(bio); \
/linux-4.4.14/fs/logfs/
Ddev_bdev.c19 struct bio bio; in sync_request() local
22 bio_init(&bio); in sync_request()
23 bio.bi_max_vecs = 1; in sync_request()
24 bio.bi_io_vec = &bio_vec; in sync_request()
28 bio.bi_vcnt = 1; in sync_request()
29 bio.bi_bdev = bdev; in sync_request()
30 bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); in sync_request()
31 bio.bi_iter.bi_size = PAGE_SIZE; in sync_request()
33 return submit_bio_wait(rw, &bio); in sync_request()
56 static void writeseg_end_io(struct bio *bio) in writeseg_end_io() argument
[all …]
/linux-4.4.14/fs/
Dmpage.c45 static void mpage_end_io(struct bio *bio) in mpage_end_io() argument
50 bio_for_each_segment_all(bv, bio, i) { in mpage_end_io()
52 page_endio(page, bio_data_dir(bio), bio->bi_error); in mpage_end_io()
55 bio_put(bio); in mpage_end_io()
58 static struct bio *mpage_bio_submit(int rw, struct bio *bio) in mpage_bio_submit() argument
60 bio->bi_end_io = mpage_end_io; in mpage_bio_submit()
61 guard_bio_eod(rw, bio); in mpage_bio_submit()
62 submit_bio(rw, bio); in mpage_bio_submit()
66 static struct bio *
71 struct bio *bio; in mpage_alloc() local
[all …]
Ddirect-io.c62 struct bio *bio; /* bio under assembly */ member
128 struct bio *bio_list; /* singly linked via bi_private */
286 static int dio_bio_complete(struct dio *dio, struct bio *bio);
291 static void dio_bio_end_aio(struct bio *bio) in dio_bio_end_aio() argument
293 struct dio *dio = bio->bi_private; in dio_bio_end_aio()
298 dio_bio_complete(dio, bio); in dio_bio_end_aio()
324 static void dio_bio_end_io(struct bio *bio) in dio_bio_end_io() argument
326 struct dio *dio = bio->bi_private; in dio_bio_end_io()
330 bio->bi_private = dio->bio_list; in dio_bio_end_io()
331 dio->bio_list = bio; in dio_bio_end_io()
[all …]
Dbuffer.c2940 static void end_bio_bh_io_sync(struct bio *bio) in end_bio_bh_io_sync() argument
2942 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync()
2944 if (unlikely(bio_flagged(bio, BIO_QUIET))) in end_bio_bh_io_sync()
2947 bh->b_end_io(bh, !bio->bi_error); in end_bio_bh_io_sync()
2948 bio_put(bio); in end_bio_bh_io_sync()
2963 void guard_bio_eod(int rw, struct bio *bio) in guard_bio_eod() argument
2966 struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; in guard_bio_eod()
2969 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; in guard_bio_eod()
2978 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) in guard_bio_eod()
2981 maxsector -= bio->bi_iter.bi_sector; in guard_bio_eod()
[all …]
Dinternal.h41 extern void guard_bio_eod(int rw, struct bio *bio);
/linux-4.4.14/fs/ext4/
Dreadpage.c58 struct bio *bio = ctx->r.bio; in completion_pages() local
62 bio_for_each_segment_all(bv, bio, i) { in completion_pages()
74 bio_put(bio); in completion_pages()
80 static inline bool ext4_bio_encrypted(struct bio *bio) in ext4_bio_encrypted() argument
83 return unlikely(bio->bi_private != NULL); in ext4_bio_encrypted()
101 static void mpage_end_io(struct bio *bio) in mpage_end_io() argument
106 if (ext4_bio_encrypted(bio)) { in mpage_end_io()
107 struct ext4_crypto_ctx *ctx = bio->bi_private; in mpage_end_io()
109 if (bio->bi_error) { in mpage_end_io()
113 ctx->r.bio = bio; in mpage_end_io()
[all …]
Dpage-io.c61 static void ext4_finish_bio(struct bio *bio) in ext4_finish_bio() argument
66 bio_for_each_segment_all(bvec, bio, i) { in ext4_finish_bio()
90 if (bio->bi_error) { in ext4_finish_bio()
109 if (bio->bi_error) in ext4_finish_bio()
126 struct bio *bio, *next_bio; in ext4_release_io_end() local
135 for (bio = io_end->bio; bio; bio = next_bio) { in ext4_release_io_end()
136 next_bio = bio->bi_private; in ext4_release_io_end()
137 ext4_finish_bio(bio); in ext4_release_io_end()
138 bio_put(bio); in ext4_release_io_end()
312 static void ext4_end_bio(struct bio *bio) in ext4_end_bio() argument
[all …]
Dcrypto.c391 struct bio *bio; in ext4_encrypted_zeroout() local
421 bio = bio_alloc(GFP_KERNEL, 1); in ext4_encrypted_zeroout()
422 if (!bio) { in ext4_encrypted_zeroout()
426 bio->bi_bdev = inode->i_sb->s_bdev; in ext4_encrypted_zeroout()
427 bio->bi_iter.bi_sector = in ext4_encrypted_zeroout()
429 ret = bio_add_page(bio, ciphertext_page, in ext4_encrypted_zeroout()
436 bio_put(bio); in ext4_encrypted_zeroout()
440 err = submit_bio_wait(WRITE, bio); in ext4_encrypted_zeroout()
441 if ((err == 0) && bio->bi_error) in ext4_encrypted_zeroout()
443 bio_put(bio); in ext4_encrypted_zeroout()
Dext4_crypto.h95 struct bio *bio; member
/linux-4.4.14/include/trace/events/
Dblock.h258 TP_PROTO(struct request_queue *q, struct bio *bio),
260 TP_ARGS(q, bio),
271 __entry->dev = bio->bi_bdev ?
272 bio->bi_bdev->bd_dev : 0;
273 __entry->sector = bio->bi_iter.bi_sector;
274 __entry->nr_sector = bio_sectors(bio);
275 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
296 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
298 TP_ARGS(q, bio, error),
309 __entry->dev = bio->bi_bdev->bd_dev;
[all …]
Dbcache.h10 TP_PROTO(struct bcache_device *d, struct bio *bio),
11 TP_ARGS(d, bio),
24 __entry->dev = bio->bi_bdev->bd_dev;
27 __entry->sector = bio->bi_iter.bi_sector;
28 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
29 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
30 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
80 TP_PROTO(struct bcache_device *d, struct bio *bio),
81 TP_ARGS(d, bio)
85 TP_PROTO(struct bcache_device *d, struct bio *bio),
[all …]
Df2fs.h774 struct bio *bio),
776 TP_ARGS(sb, fio, bio),
790 __entry->sector = bio->bi_iter.bi_sector;
791 __entry->size = bio->bi_iter.bi_size;
805 struct bio *bio),
807 TP_ARGS(sb, fio, bio),
809 TP_CONDITION(bio)
815 struct bio *bio),
817 TP_ARGS(sb, fio, bio),
819 TP_CONDITION(bio)
/linux-4.4.14/drivers/md/
Ddm-raid1.c121 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) in queue_bio() argument
130 bio_list_add(bl, bio); in queue_bio()
140 struct bio *bio; in dispatch_bios() local
142 while ((bio = bio_list_pop(bio_list))) in dispatch_bios()
143 queue_bio(ms, bio, WRITE); in dispatch_bios()
163 static struct mirror *bio_get_m(struct bio *bio) in bio_get_m() argument
165 return (struct mirror *) bio->bi_next; in bio_get_m()
168 static void bio_set_m(struct bio *bio, struct mirror *m) in bio_set_m() argument
170 bio->bi_next = (struct bio *) m; in bio_set_m()
443 static int mirror_available(struct mirror_set *ms, struct bio *bio) in mirror_available() argument
[all …]
Ddm-thin.c217 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
338 struct bio *parent_bio) in __blkdev_issue_discard_async()
342 struct bio *bio; in __blkdev_issue_discard_async() local
359 bio = bio_alloc(gfp_mask, 1); in __blkdev_issue_discard_async()
360 if (!bio) in __blkdev_issue_discard_async()
363 bio_chain(bio, parent_bio); in __blkdev_issue_discard_async()
365 bio->bi_iter.bi_sector = sector; in __blkdev_issue_discard_async()
366 bio->bi_bdev = bdev; in __blkdev_issue_discard_async()
367 bio->bi_iter.bi_size = nr_sects << 9; in __blkdev_issue_discard_async()
369 submit_bio(type, bio); in __blkdev_issue_discard_async()
[all …]
Ddm-log-writes.c149 static void log_end_io(struct bio *bio) in log_end_io() argument
151 struct log_writes_c *lc = bio->bi_private; in log_end_io()
155 if (bio->bi_error) { in log_end_io()
158 DMERR("Error writing log block, error=%d", bio->bi_error); in log_end_io()
164 bio_for_each_segment_all(bvec, bio, i) in log_end_io()
168 bio_put(bio); in log_end_io()
193 struct bio *bio; in write_metadata() local
198 bio = bio_alloc(GFP_KERNEL, 1); in write_metadata()
199 if (!bio) { in write_metadata()
203 bio->bi_iter.bi_size = 0; in write_metadata()
[all …]
Ddm-bio-record.h26 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) in dm_bio_record() argument
28 bd->bi_bdev = bio->bi_bdev; in dm_bio_record()
29 bd->bi_flags = bio->bi_flags; in dm_bio_record()
30 bd->bi_iter = bio->bi_iter; in dm_bio_record()
33 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) in dm_bio_restore() argument
35 bio->bi_bdev = bd->bi_bdev; in dm_bio_restore()
36 bio->bi_flags = bd->bi_flags; in dm_bio_restore()
37 bio->bi_iter = bd->bi_iter; in dm_bio_restore()
Draid1.c54 #define IO_BLOCKED ((struct bio *)1)
59 #define IO_MADE_GOOD ((struct bio *)2)
61 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) argument
101 struct bio *bio; in r1buf_pool_alloc() local
113 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); in r1buf_pool_alloc()
114 if (!bio) in r1buf_pool_alloc()
116 r1_bio->bios[j] = bio; in r1buf_pool_alloc()
129 bio = r1_bio->bios[j]; in r1buf_pool_alloc()
130 bio->bi_vcnt = RESYNC_PAGES; in r1buf_pool_alloc()
132 if (bio_alloc_pages(bio, gfp_flags)) in r1buf_pool_alloc()
[all …]
Ddm-cache-target.c124 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, in dm_hook_bio() argument
127 h->bi_end_io = bio->bi_end_io; in dm_hook_bio()
128 h->bi_private = bio->bi_private; in dm_hook_bio()
130 bio->bi_end_io = bi_end_io; in dm_hook_bio()
131 bio->bi_private = bi_private; in dm_hook_bio()
134 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) in dm_unhook_bio() argument
136 bio->bi_end_io = h->bi_end_io; in dm_unhook_bio()
137 bio->bi_private = h->bi_private; in dm_unhook_bio()
529 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, in bio_detain_range() argument
537 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result); in bio_detain_range()
[all …]
Ddm-flakey.c18 #define all_corrupt_bio_flags_match(bio, fc) \ argument
19 (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
247 static void flakey_map_bio(struct dm_target *ti, struct bio *bio) in flakey_map_bio() argument
251 bio->bi_bdev = fc->dev->bdev; in flakey_map_bio()
252 if (bio_sectors(bio)) in flakey_map_bio()
253 bio->bi_iter.bi_sector = in flakey_map_bio()
254 flakey_map_sector(ti, bio->bi_iter.bi_sector); in flakey_map_bio()
257 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) in corrupt_bio_data() argument
259 unsigned bio_bytes = bio_cur_bytes(bio); in corrupt_bio_data()
260 char *data = bio_data(bio); in corrupt_bio_data()
[all …]
Dmultipath.c77 struct bio *bio = mp_bh->master_bio; in multipath_end_bh_io() local
80 bio->bi_error = err; in multipath_end_bh_io()
81 bio_endio(bio); in multipath_end_bh_io()
85 static void multipath_end_request(struct bio *bio) in multipath_end_request() argument
87 struct multipath_bh *mp_bh = bio->bi_private; in multipath_end_request()
91 if (!bio->bi_error) in multipath_end_request()
93 else if (!(bio->bi_rw & REQ_RAHEAD)) { in multipath_end_request()
101 (unsigned long long)bio->bi_iter.bi_sector); in multipath_end_request()
104 multipath_end_bh_io(mp_bh, bio->bi_error); in multipath_end_request()
108 static void multipath_make_request(struct mddev *mddev, struct bio * bio) in multipath_make_request() argument
[all …]
Ddm-delay.c64 static void flush_bios(struct bio *bio) in flush_bios() argument
66 struct bio *n; in flush_bios()
68 while (bio) { in flush_bios()
69 n = bio->bi_next; in flush_bios()
70 bio->bi_next = NULL; in flush_bios()
71 generic_make_request(bio); in flush_bios()
72 bio = n; in flush_bios()
76 static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) in flush_delayed_bios()
86 struct bio *bio = dm_bio_from_per_bio_data(delayed, in flush_delayed_bios() local
89 bio_list_add(&flush_bios, bio); in flush_delayed_bios()
[all …]
Draid10.c84 #define IO_BLOCKED ((struct bio *)1)
89 #define IO_MADE_GOOD ((struct bio *)2)
91 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) argument
105 static void end_reshape_write(struct bio *bio);
143 struct bio *bio; in r10buf_pool_alloc() local
161 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); in r10buf_pool_alloc()
162 if (!bio) in r10buf_pool_alloc()
164 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
167 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); in r10buf_pool_alloc()
168 if (!bio) in r10buf_pool_alloc()
[all …]
Ddm-snap.c208 struct bio *full_bio;
225 static void init_tracked_chunk(struct bio *bio) in init_tracked_chunk() argument
227 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in init_tracked_chunk()
231 static bool is_bio_tracked(struct bio *bio) in is_bio_tracked() argument
233 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in is_bio_tracked()
237 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) in track_chunk() argument
239 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in track_chunk()
249 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) in stop_tracking_chunk() argument
251 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in stop_tracking_chunk()
845 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) in __release_queued_bios_after_merge()
[all …]
Dfaulty.c73 static void faulty_fail(struct bio *bio) in faulty_fail() argument
75 struct bio *b = bio->bi_private; in faulty_fail()
77 b->bi_iter.bi_size = bio->bi_iter.bi_size; in faulty_fail()
78 b->bi_iter.bi_sector = bio->bi_iter.bi_sector; in faulty_fail()
80 bio_put(bio); in faulty_fail()
173 static void make_request(struct mddev *mddev, struct bio *bio) in make_request() argument
178 if (bio_data_dir(bio) == WRITE) { in make_request()
184 bio_io_error(bio); in make_request()
188 if (check_sector(conf, bio->bi_iter.bi_sector, in make_request()
189 bio_end_sector(bio), WRITE)) in make_request()
[all …]
Ddm-io.c89 static void store_io_and_region_in_bio(struct bio *bio, struct io *io, in store_io_and_region_in_bio() argument
97 bio->bi_private = (void *)((unsigned long)io | region); in store_io_and_region_in_bio()
100 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, in retrieve_io_and_region_from_bio() argument
103 unsigned long val = (unsigned long)bio->bi_private; in retrieve_io_and_region_from_bio()
136 static void endio(struct bio *bio) in endio() argument
142 if (bio->bi_error && bio_data_dir(bio) == READ) in endio()
143 zero_fill_bio(bio); in endio()
148 retrieve_io_and_region_from_bio(bio, &io, &region); in endio()
150 error = bio->bi_error; in endio()
151 bio_put(bio); in endio()
[all …]
Ddm-stripe.c261 static int stripe_map_range(struct stripe_c *sc, struct bio *bio, in stripe_map_range() argument
266 stripe_map_range_sector(sc, bio->bi_iter.bi_sector, in stripe_map_range()
268 stripe_map_range_sector(sc, bio_end_sector(bio), in stripe_map_range()
271 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; in stripe_map_range()
272 bio->bi_iter.bi_sector = begin + in stripe_map_range()
274 bio->bi_iter.bi_size = to_bytes(end - begin); in stripe_map_range()
278 bio_endio(bio); in stripe_map_range()
283 static int stripe_map(struct dm_target *ti, struct bio *bio) in stripe_map() argument
289 if (bio->bi_rw & REQ_FLUSH) { in stripe_map()
290 target_bio_nr = dm_bio_get_target_bio_nr(bio); in stripe_map()
[all …]
Dlinear.c217 static void linear_make_request(struct mddev *mddev, struct bio *bio) in linear_make_request() argument
221 struct bio *split; in linear_make_request()
224 if (unlikely(bio->bi_rw & REQ_FLUSH)) { in linear_make_request()
225 md_flush_request(mddev, bio); in linear_make_request()
230 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); in linear_make_request()
234 bio->bi_bdev = tmp_dev->rdev->bdev; in linear_make_request()
236 if (unlikely(bio->bi_iter.bi_sector >= end_sector || in linear_make_request()
237 bio->bi_iter.bi_sector < start_sector)) in linear_make_request()
240 if (unlikely(bio_end_sector(bio) > end_sector)) { in linear_make_request()
244 split = bio_split(bio, end_sector - in linear_make_request()
[all …]
Ddm-linear.c85 static void linear_map_bio(struct dm_target *ti, struct bio *bio) in linear_map_bio() argument
89 bio->bi_bdev = lc->dev->bdev; in linear_map_bio()
90 if (bio_sectors(bio)) in linear_map_bio()
91 bio->bi_iter.bi_sector = in linear_map_bio()
92 linear_map_sector(ti, bio->bi_iter.bi_sector); in linear_map_bio()
95 static int linear_map(struct dm_target *ti, struct bio *bio) in linear_map() argument
97 linear_map_bio(ti, bio); in linear_map()
Ddm.c73 struct bio *bio; member
104 struct bio *orig;
106 struct bio clone;
216 struct bio flush_bio;
676 struct bio *bio = io->bio; in start_io_acct() local
678 int rw = bio_data_dir(bio); in start_io_acct()
689 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in start_io_acct()
690 bio_sectors(bio), false, 0, &io->stats_aux); in start_io_acct()
696 struct bio *bio = io->bio; in end_io_acct() local
699 int rw = bio_data_dir(bio); in end_io_acct()
[all …]
Ddm-zero.c36 static int zero_map(struct dm_target *ti, struct bio *bio) in zero_map() argument
38 switch(bio_rw(bio)) { in zero_map()
40 zero_fill_bio(bio); in zero_map()
50 bio_endio(bio); in zero_map()
Ddm-verity.c350 struct bio *bio = dm_bio_from_per_bio_data(io, in verity_verify_io() local
405 struct bio_vec bv = bio_iter_iovec(bio, io->iter); in verity_verify_io()
419 bio_advance_iter(bio, &io->iter, len); in verity_verify_io()
453 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size); in verity_finish_io() local
455 bio->bi_end_io = io->orig_bi_end_io; in verity_finish_io()
456 bio->bi_private = io->orig_bi_private; in verity_finish_io()
457 bio->bi_error = error; in verity_finish_io()
459 bio_endio(bio); in verity_finish_io()
469 static void verity_end_io(struct bio *bio) in verity_end_io() argument
471 struct dm_verity_io *io = bio->bi_private; in verity_end_io()
[all …]
Ddm-region-hash.c127 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) in dm_rh_bio_to_region() argument
129 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - in dm_rh_bio_to_region()
393 void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) in dm_rh_mark_nosync() argument
398 region_t region = dm_rh_bio_to_region(rh, bio); in dm_rh_mark_nosync()
401 if (bio->bi_rw & REQ_FLUSH) { in dm_rh_mark_nosync()
406 if (bio->bi_rw & REQ_DISCARD) in dm_rh_mark_nosync()
526 struct bio *bio; in dm_rh_inc_pending() local
528 for (bio = bios->head; bio; bio = bio->bi_next) { in dm_rh_inc_pending()
529 if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)) in dm_rh_inc_pending()
531 rh_inc(rh, dm_rh_bio_to_region(rh, bio)); in dm_rh_inc_pending()
[all …]
Draid0.c442 unsigned int chunk_sects, struct bio *bio) in is_io_in_chunk_boundary() argument
446 ((bio->bi_iter.bi_sector & (chunk_sects-1)) in is_io_in_chunk_boundary()
447 + bio_sectors(bio)); in is_io_in_chunk_boundary()
449 sector_t sector = bio->bi_iter.bi_sector; in is_io_in_chunk_boundary()
451 + bio_sectors(bio)); in is_io_in_chunk_boundary()
455 static void raid0_make_request(struct mddev *mddev, struct bio *bio) in raid0_make_request() argument
459 struct bio *split; in raid0_make_request()
461 if (unlikely(bio->bi_rw & REQ_FLUSH)) { in raid0_make_request()
462 md_flush_request(mddev, bio); in raid0_make_request()
467 sector_t sector = bio->bi_iter.bi_sector; in raid0_make_request()
[all …]
Ddm-bufio.c150 struct bio bio; member
548 b->bio.bi_error = error ? -EIO : 0; in dmio_complete()
549 b->bio.bi_end_io(&b->bio); in dmio_complete()
576 b->bio.bi_end_io = end_io; in use_dmio()
580 b->bio.bi_error = r; in use_dmio()
581 end_io(&b->bio); in use_dmio()
585 static void inline_endio(struct bio *bio) in inline_endio() argument
587 bio_end_io_t *end_fn = bio->bi_private; in inline_endio()
588 int error = bio->bi_error; in inline_endio()
594 bio_reset(bio); in inline_endio()
[all …]
Dmultipath.h26 struct bio *master_bio;
27 struct bio bio; member
Draid10.h109 struct bio *master_bio;
125 struct bio *bio; member
127 struct bio *repl_bio; /* used for resync and
Ddm-bio-prison.c75 struct bio *holder, in __setup_new_cell()
109 struct bio *inmate, in __bio_detain()
146 struct bio *inmate, in bio_detain()
162 struct bio *inmate, in dm_bio_detain()
234 struct bio *bio; in dm_cell_error() local
239 while ((bio = bio_list_pop(&bios))) { in dm_cell_error()
240 bio->bi_error = error; in dm_cell_error()
241 bio_endio(bio); in dm_cell_error()
Draid5-cache.c70 struct bio flush_bio;
104 struct bio *current_bio;/* current_bio accepting new data */
209 static void r5l_log_endio(struct bio *bio) in r5l_log_endio() argument
211 struct r5l_io_unit *io = bio->bi_private; in r5l_log_endio()
215 if (bio->bi_error) in r5l_log_endio()
218 bio_put(bio); in r5l_log_endio()
256 static struct bio *r5l_bio_alloc(struct r5l_log *log) in r5l_bio_alloc()
258 struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES); in r5l_bio_alloc() local
260 bio->bi_rw = WRITE; in r5l_bio_alloc()
261 bio->bi_bdev = log->rdev->bdev; in r5l_bio_alloc()
[all …]
Dmd.h461 struct bio *flush_bio;
504 void (*make_request)(struct mddev *mddev, struct bio *bio);
640 extern void md_write_start(struct mddev *mddev, struct bio *bi);
647 extern void md_flush_request(struct mddev *mddev, struct bio *bio);
672 extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
674 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
Ddm-era-target.c1183 static dm_block_t get_block(struct era *era, struct bio *bio) in get_block() argument
1185 sector_t block_nr = bio->bi_iter.bi_sector; in get_block()
1195 static void remap_to_origin(struct era *era, struct bio *bio) in remap_to_origin() argument
1197 bio->bi_bdev = era->origin_dev->bdev; in remap_to_origin()
1229 struct bio *bio; in process_deferred_bios() local
1241 while ((bio = bio_list_pop(&deferred_bios))) { in process_deferred_bios()
1244 get_block(era, bio)); in process_deferred_bios()
1255 bio_list_add(&marked_bios, bio); in process_deferred_bios()
1265 while ((bio = bio_list_pop(&marked_bios))) in process_deferred_bios()
1266 bio_io_error(bio); in process_deferred_bios()
[all …]
Draid5.h244 struct bio req, rreq;
247 struct bio *toread, *read, *towrite, *written;
475 struct bio *retry_read_aligned; /* currently retrying aligned bios */
476 struct bio *retry_read_aligned_list; /* aligned bios retry list */
635 extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
Ddm-crypt.c41 struct bio *bio_in;
42 struct bio *bio_out;
55 struct bio *base_bio;
185 static void clone_init(struct dm_crypt_io *, struct bio *);
805 struct bio *bio_out, struct bio *bio_in, in crypt_convert_init()
906 struct ablkcipher_request *req, struct bio *base_bio) in crypt_free_req()
968 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
987 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) in crypt_alloc_buffer()
990 struct bio *clone; in crypt_alloc_buffer()
1037 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) in crypt_free_buffer_pages()
[all …]
Draid1.h145 struct bio *master_bio;
159 struct bio *bios[0];
Ddm-cache-policy-internal.h20 struct bio *bio, struct policy_locker *locker, in policy_map() argument
23 return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, locker, result); in policy_map()
Ddm-bio-prison.h45 struct bio *holder;
83 struct bio *inmate,
Ddm-cache-policy.h137 struct bio *bio, struct policy_locker *locker,
Ddm-switch.c319 static int switch_map(struct dm_target *ti, struct bio *bio) in switch_map() argument
322 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); in switch_map()
325 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; in switch_map()
326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; in switch_map()
Ddm-target.c129 static int io_err_map(struct dm_target *tt, struct bio *bio) in io_err_map() argument
Ddm-cache-policy-mq.c74 static void iot_update_stats(struct io_tracker *t, struct bio *bio) in iot_update_stats() argument
76 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) in iot_update_stats()
91 t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1); in iot_update_stats()
113 static void iot_examine_bio(struct io_tracker *t, struct bio *bio) in iot_examine_bio() argument
115 iot_update_stats(t, bio); in iot_examine_bio()
1029 struct bio *bio, struct policy_locker *locker, in mq_map() argument
1044 iot_examine_bio(&mq->tracker, bio); in mq_map()
1046 bio_data_dir(bio), locker, result); in mq_map()
Draid5.c138 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) in r5_next_bio() argument
140 int sectors = bio_sectors(bio); in r5_next_bio()
141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) in r5_next_bio()
142 return bio->bi_next; in r5_next_bio()
151 static inline int raid5_bi_processed_stripes(struct bio *bio) in raid5_bi_processed_stripes() argument
153 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; in raid5_bi_processed_stripes()
157 static inline int raid5_dec_bi_active_stripes(struct bio *bio) in raid5_dec_bi_active_stripes() argument
159 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; in raid5_dec_bi_active_stripes()
163 static inline void raid5_inc_bi_active_stripes(struct bio *bio) in raid5_inc_bi_active_stripes() argument
165 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; in raid5_inc_bi_active_stripes()
[all …]
Ddm-cache-policy-smq.c1140 …atic enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e, struct bio *bio, in should_promote() argument
1143 if (bio_data_dir(bio) == WRITE) { in should_promote()
1190 static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b, struct bio *bio) in update_hotspot_queue() argument
1232 static int map(struct smq_policy *mq, struct bio *bio, dm_oblock_t oblock, in map() argument
1239 hs_e = update_hotspot_queue(mq, oblock, bio); in map()
1252 pr = should_promote(mq, hs_e, bio, fast_promote); in map()
1295 struct bio *bio, struct policy_locker *locker, in smq_map() argument
1305 r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result); in smq_map()
Dmd.c164 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, in bio_alloc_mddev()
167 struct bio *b; in bio_alloc_mddev()
179 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, in bio_clone_mddev() argument
183 return bio_clone(bio, gfp_mask); in bio_clone_mddev()
185 return bio_clone_bioset(bio, gfp_mask, mddev->bio_set); in bio_clone_mddev()
253 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) in md_make_request() argument
255 const int rw = bio_data_dir(bio); in md_make_request()
260 blk_queue_split(q, &bio, q->bio_split); in md_make_request()
264 bio_io_error(bio); in md_make_request()
268 if (bio_sectors(bio) != 0) in md_make_request()
[all …]
/linux-4.4.14/mm/
Dpage_io.c27 static struct bio *get_swap_bio(gfp_t gfp_flags, in get_swap_bio()
30 struct bio *bio; in get_swap_bio() local
32 bio = bio_alloc(gfp_flags, 1); in get_swap_bio()
33 if (bio) { in get_swap_bio()
34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); in get_swap_bio()
35 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; in get_swap_bio()
36 bio->bi_end_io = end_io; in get_swap_bio()
38 bio_add_page(bio, page, PAGE_SIZE, 0); in get_swap_bio()
39 BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE); in get_swap_bio()
41 return bio; in get_swap_bio()
[all …]
/linux-4.4.14/drivers/target/
Dtarget_core_iblock.c295 static void iblock_bio_done(struct bio *bio) in iblock_bio_done() argument
297 struct se_cmd *cmd = bio->bi_private; in iblock_bio_done()
300 if (bio->bi_error) { in iblock_bio_done()
301 pr_err("bio error: %p, err: %d\n", bio, bio->bi_error); in iblock_bio_done()
309 bio_put(bio); in iblock_bio_done()
314 static struct bio *
318 struct bio *bio; in iblock_get_bio() local
327 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); in iblock_get_bio()
328 if (!bio) { in iblock_get_bio()
333 bio->bi_bdev = ib_dev->ibd_bd; in iblock_get_bio()
[all …]
Dtarget_core_pscsi.c855 static void pscsi_bi_endio(struct bio *bio) in pscsi_bi_endio() argument
857 bio_put(bio); in pscsi_bi_endio()
860 static inline struct bio *pscsi_get_bio(int nr_vecs) in pscsi_get_bio()
862 struct bio *bio; in pscsi_get_bio() local
867 bio = bio_kmalloc(GFP_KERNEL, nr_vecs); in pscsi_get_bio()
868 if (!bio) { in pscsi_get_bio()
872 bio->bi_end_io = pscsi_bi_endio; in pscsi_get_bio()
874 return bio; in pscsi_get_bio()
879 enum dma_data_direction data_direction, struct bio **hbio) in pscsi_map_sg()
882 struct bio *bio = NULL, *tbio = NULL; in pscsi_map_sg() local
[all …]
/linux-4.4.14/fs/nfs/blocklayout/
Dblocklayout.c104 static struct bio *
105 bl_submit_bio(int rw, struct bio *bio) in bl_submit_bio() argument
107 if (bio) { in bl_submit_bio()
108 get_parallel(bio->bi_private); in bl_submit_bio()
110 rw == READ ? "read" : "write", bio->bi_iter.bi_size, in bl_submit_bio()
111 (unsigned long long)bio->bi_iter.bi_sector); in bl_submit_bio()
112 submit_bio(rw, bio); in bl_submit_bio()
117 static struct bio *
121 struct bio *bio; in bl_alloc_init_bio() local
124 bio = bio_alloc(GFP_NOIO, npg); in bl_alloc_init_bio()
[all …]
/linux-4.4.14/drivers/block/rsxx/
Ddev.c59 struct bio *bio; member
113 static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio) in disk_stats_start() argument
115 generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio), in disk_stats_start()
120 struct bio *bio, in disk_stats_complete() argument
123 generic_end_io_acct(bio_data_dir(bio), &card->gendisk->part0, in disk_stats_complete()
138 disk_stats_complete(card, meta->bio, meta->start_time); in bio_dma_done_cb()
141 bio_io_error(meta->bio); in bio_dma_done_cb()
143 bio_endio(meta->bio); in bio_dma_done_cb()
148 static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio) in rsxx_make_request() argument
154 blk_queue_split(q, &bio, q->bio_split); in rsxx_make_request()
[all …]
Ddma.c681 struct bio *bio, in rsxx_dma_queue_bio() argument
700 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ in rsxx_dma_queue_bio()
708 if (bio->bi_rw & REQ_DISCARD) { in rsxx_dma_queue_bio()
709 bv_len = bio->bi_iter.bi_size; in rsxx_dma_queue_bio()
726 bio_for_each_segment(bvec, bio, iter) { in rsxx_dma_queue_bio()
738 bio_data_dir(bio), in rsxx_dma_queue_bio()
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
Dlloop.c131 struct bio *lo_bio;
132 struct bio *lo_biotail;
185 static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) in do_bio_lustrebacked()
197 struct bio *bio; in do_bio_lustrebacked() local
216 for (bio = head; bio != NULL; bio = bio->bi_next) { in do_bio_lustrebacked()
217 LASSERT(rw == bio->bi_rw); in do_bio_lustrebacked()
219 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; in do_bio_lustrebacked()
220 bio_for_each_segment(bvec, bio, iter) { in do_bio_lustrebacked()
270 static void loop_add_bio(struct lloop_device *lo, struct bio *bio) in loop_add_bio() argument
276 lo->lo_biotail->bi_next = bio; in loop_add_bio()
[all …]
/linux-4.4.14/fs/btrfs/
Draid56.c186 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
868 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
869 struct bio *next; in rbio_orig_end_io()
889 static void raid_write_end_io(struct bio *bio) in raid_write_end_io() argument
891 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_end_io()
892 int err = bio->bi_error; in raid_write_end_io()
895 fail_bio_stripe(rbio, bio); in raid_write_end_io()
897 bio_put(bio); in raid_write_end_io()
1062 struct bio *last = bio_list->tail; in rbio_add_io_page()
1065 struct bio *bio; in rbio_add_io_page() local
[all …]
Draid56.h45 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
48 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
55 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
62 raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
Dcompression.c75 struct bio *orig_bio;
97 static struct bio *compressed_bio_alloc(struct block_device *bdev, in compressed_bio_alloc()
152 static void end_compressed_bio_read(struct bio *bio) in end_compressed_bio_read() argument
154 struct compressed_bio *cb = bio->bi_private; in end_compressed_bio_read()
160 if (bio->bi_error) in end_compressed_bio_read()
171 (u64)bio->bi_iter.bi_sector << 9); in end_compressed_bio_read()
217 bio_put(bio); in end_compressed_bio_read()
266 static void end_compressed_bio_write(struct bio *bio) in end_compressed_bio_write() argument
269 struct compressed_bio *cb = bio->bi_private; in end_compressed_bio_write()
274 if (bio->bi_error) in end_compressed_bio_write()
[all …]
Dcheck-integrity.h24 void btrfsic_submit_bio(int rw, struct bio *bio);
25 int btrfsic_submit_bio_wait(int rw, struct bio *bio);
Dscrub.c97 struct bio *bio; member
277 static void scrub_bio_end_io(struct bio *bio);
294 static void scrub_wr_bio_end_io(struct bio *bio);
435 bio_put(sbio->bio); in scrub_free_ctx()
1427 static void scrub_bio_wait_endio(struct bio *bio) in scrub_bio_wait_endio() argument
1429 struct scrub_bio_ret *ret = bio->bi_private; in scrub_bio_wait_endio()
1431 ret->error = bio->bi_error; in scrub_bio_wait_endio()
1442 struct bio *bio, in scrub_submit_raid56_bio_wait() argument
1450 bio->bi_iter.bi_sector = page->logical >> 9; in scrub_submit_raid56_bio_wait()
1451 bio->bi_private = &done; in scrub_submit_raid56_bio_wait()
[all …]
Dextent_io.c120 struct bio *bio; member
177 offsetof(struct btrfs_io_bio, bio)); in extent_io_init()
2098 struct bio *bio; in repair_io_failure() local
2113 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); in repair_io_failure()
2114 if (!bio) in repair_io_failure()
2116 bio->bi_iter.bi_size = 0; in repair_io_failure()
2122 bio_put(bio); in repair_io_failure()
2127 bio->bi_iter.bi_sector = sector; in repair_io_failure()
2131 bio_put(bio); in repair_io_failure()
2134 bio->bi_bdev = dev->bdev; in repair_io_failure()
[all …]
Dvolumes.h33 struct bio *head;
34 struct bio *tail;
128 struct bio *flush_bio;
275 typedef void (btrfs_io_bio_end_io_t) (struct btrfs_io_bio *bio, int err);
284 struct bio bio; member
287 static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio) in btrfs_io_bio() argument
289 return container_of(bio, struct btrfs_io_bio, bio); in btrfs_io_bio()
299 typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
307 struct bio *orig_bio;
440 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
Dcheck-integrity.c168 bio_end_io_t *bio; member
339 struct bio *bio, int *bio_is_patched,
346 static void btrfsic_bio_end_io(struct bio *bp);
416 b->orig_bio_bh_end_io.bio = NULL; in btrfsic_block_init()
1674 struct bio *bio; in btrfsic_read_block() local
1677 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); in btrfsic_read_block()
1678 if (!bio) { in btrfsic_read_block()
1684 bio->bi_bdev = block_ctx->dev->bdev; in btrfsic_read_block()
1685 bio->bi_iter.bi_sector = dev_bytenr >> 9; in btrfsic_read_block()
1688 ret = bio_add_page(bio, block_ctx->pagev[j], in btrfsic_read_block()
[all …]
Dfile-item.c156 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err) in btrfs_io_bio_endio_readpage() argument
158 kfree(bio->csum_allocated); in btrfs_io_bio_endio_readpage()
162 struct inode *inode, struct bio *bio, in __btrfs_lookup_bio_sums() argument
165 struct bio_vec *bvec = bio->bi_io_vec; in __btrfs_lookup_bio_sums()
166 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); in __btrfs_lookup_bio_sums()
185 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; in __btrfs_lookup_bio_sums()
204 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) in __btrfs_lookup_bio_sums()
207 WARN_ON(bio->bi_vcnt <= 0); in __btrfs_lookup_bio_sums()
220 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; in __btrfs_lookup_bio_sums()
223 while (bio_index < bio->bi_vcnt) { in __btrfs_lookup_bio_sums()
[all …]
Dextent_io.h67 struct bio *bio, int mirror_num,
77 size_t size, struct bio *bio,
347 struct bio *
350 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs);
351 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
386 int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
388 struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
Ddisk-io.c80 struct bio *bio; member
117 struct bio *bio; member
705 static void end_workqueue_bio(struct bio *bio) in end_workqueue_bio() argument
707 struct btrfs_end_io_wq *end_io_wq = bio->bi_private; in end_workqueue_bio()
713 end_io_wq->error = bio->bi_error; in end_workqueue_bio()
715 if (bio->bi_rw & REQ_WRITE) { in end_workqueue_bio()
750 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, in btrfs_bio_wq_end_io() argument
759 end_io_wq->private = bio->bi_private; in btrfs_bio_wq_end_io()
760 end_io_wq->end_io = bio->bi_end_io; in btrfs_bio_wq_end_io()
763 end_io_wq->bio = bio; in btrfs_bio_wq_end_io()
[all …]
Ddisk-io.h123 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
126 int rw, struct bio *bio, int mirror_num,
Dinode.c1805 size_t size, struct bio *bio, in btrfs_merge_bio_hook() argument
1809 u64 logical = (u64)bio->bi_iter.bi_sector << 9; in btrfs_merge_bio_hook()
1817 length = bio->bi_iter.bi_size; in btrfs_merge_bio_hook()
1837 struct bio *bio, int mirror_num, in __btrfs_submit_bio_start() argument
1844 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); in __btrfs_submit_bio_start()
1857 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, in __btrfs_submit_bio_done() argument
1864 ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); in __btrfs_submit_bio_done()
1866 bio->bi_error = ret; in __btrfs_submit_bio_done()
1867 bio_endio(bio); in __btrfs_submit_bio_done()
1876 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, in btrfs_submit_bio_hook() argument
[all …]
Dcompression.h46 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
Dvolumes.c305 struct bio *head, struct bio *tail) in requeue_list()
308 struct bio *old_head; in requeue_list()
331 struct bio *pending; in run_scheduled_bios()
335 struct bio *tail; in run_scheduled_bios()
336 struct bio *cur; in run_scheduled_bios()
5854 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) in btrfs_end_bbio() argument
5856 bio->bi_private = bbio->private; in btrfs_end_bbio()
5857 bio->bi_end_io = bbio->end_io; in btrfs_end_bbio()
5858 bio_endio(bio); in btrfs_end_bbio()
5863 static void btrfs_end_bio(struct bio *bio) in btrfs_end_bio() argument
[all …]
Dbtrfs_inode.h296 struct bio *orig_bio;
299 struct bio *dio_bio;
/linux-4.4.14/fs/nilfs2/
Dsegbuf.c35 struct bio *bio; member
341 static void nilfs_end_bio_write(struct bio *bio) in nilfs_end_bio_write() argument
343 struct nilfs_segment_buffer *segbuf = bio->bi_private; in nilfs_end_bio_write()
345 if (bio->bi_error) in nilfs_end_bio_write()
348 bio_put(bio); in nilfs_end_bio_write()
355 struct bio *bio = wi->bio; in nilfs_segbuf_submit_bio() local
363 bio_put(bio); in nilfs_segbuf_submit_bio()
369 bio->bi_end_io = nilfs_end_bio_write; in nilfs_segbuf_submit_bio()
370 bio->bi_private = segbuf; in nilfs_segbuf_submit_bio()
371 submit_bio(mode, bio); in nilfs_segbuf_submit_bio()
[all …]
/linux-4.4.14/drivers/lightnvm/
Drrpc.c22 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
93 static void rrpc_discard(struct rrpc *rrpc, struct bio *bio) in rrpc_discard() argument
95 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG; in rrpc_discard()
96 sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE; in rrpc_discard()
106 bio_io_error(bio); in rrpc_discard()
248 static void rrpc_end_sync_bio(struct bio *bio) in rrpc_end_sync_bio() argument
250 struct completion *waiting = bio->bi_private; in rrpc_end_sync_bio()
252 if (bio->bi_error) in rrpc_end_sync_bio()
253 pr_err("nvm: gc request failed (%u).\n", bio->bi_error); in rrpc_end_sync_bio()
273 struct bio *bio; in rrpc_move_valid_pages() local
[all …]
Drrpc.h148 static inline sector_t rrpc_get_laddr(struct bio *bio) in rrpc_get_laddr() argument
150 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG; in rrpc_get_laddr()
153 static inline unsigned int rrpc_get_pages(struct bio *bio) in rrpc_get_pages() argument
155 return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE; in rrpc_get_pages()
209 static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio, in rrpc_lock_rq() argument
212 sector_t laddr = rrpc_get_laddr(bio); in rrpc_lock_rq()
213 unsigned int pages = rrpc_get_pages(bio); in rrpc_lock_rq()
/linux-4.4.14/drivers/block/
Dumem.c110 struct bio *bio, *currentbio, **biotail; member
119 struct bio *bio, **biotail; member
330 page->bio = NULL; in reset_page()
331 page->biotail = &page->bio; in reset_page()
345 struct bio *bio; in add_bio() local
349 bio = card->currentbio; in add_bio()
350 if (!bio && card->bio) { in add_bio()
351 card->currentbio = card->bio; in add_bio()
352 card->current_iter = card->bio->bi_iter; in add_bio()
353 card->bio = card->bio->bi_next; in add_bio()
[all …]
Dpktcdvd.c546 struct bio *bio = bio_kmalloc(GFP_KERNEL, 1); in pkt_alloc_packet_data() local
547 if (!bio) in pkt_alloc_packet_data()
550 pkt->r_bios[i] = bio; in pkt_alloc_packet_data()
557 struct bio *bio = pkt->r_bios[i]; in pkt_alloc_packet_data() local
558 if (bio) in pkt_alloc_packet_data()
559 bio_put(bio); in pkt_alloc_packet_data()
581 struct bio *bio = pkt->r_bios[i]; in pkt_free_packet_data() local
582 if (bio) in pkt_free_packet_data()
583 bio_put(bio); in pkt_free_packet_data()
655 if (s <= tmp->bio->bi_iter.bi_sector) in pkt_rbtree_find()
[all …]
Dosdblk.c98 struct bio *bio; /* cloned bio */ member
252 static void bio_chain_put(struct bio *chain) in bio_chain_put()
254 struct bio *tmp; in bio_chain_put()
264 static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask) in bio_chain_clone()
266 struct bio *tmp, *new_chain = NULL, *tail = NULL; in bio_chain_clone()
303 struct bio *bio; in osdblk_rq_fn() local
329 bio = bio_chain_clone(rq->bio, GFP_ATOMIC); in osdblk_rq_fn()
330 if (!bio) in osdblk_rq_fn()
333 bio = NULL; in osdblk_rq_fn()
338 bio_chain_put(bio); in osdblk_rq_fn()
[all …]
Dnull_blk.c18 struct bio *bio; member
234 bio_endio(cmd->bio); in end_cmd()
311 static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) in null_queue_bio() argument
318 cmd->bio = bio; in null_queue_bio()
450 struct bio *bio = rqd->bio; in null_lnvm_submit_io() local
452 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0); in null_lnvm_submit_io()
457 rq->__sector = bio->bi_iter.bi_sector; in null_lnvm_submit_io()
458 rq->ioprio = bio_prio(bio); in null_lnvm_submit_io()
460 if (bio_has_data(bio)) in null_lnvm_submit_io()
461 rq->nr_phys_segments = bio_phys_segments(q, bio); in null_lnvm_submit_io()
[all …]
Dbrd.c326 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) in brd_make_request() argument
328 struct block_device *bdev = bio->bi_bdev; in brd_make_request()
335 sector = bio->bi_iter.bi_sector; in brd_make_request()
336 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) in brd_make_request()
339 if (unlikely(bio->bi_rw & REQ_DISCARD)) { in brd_make_request()
341 bio->bi_iter.bi_size & ~PAGE_MASK) in brd_make_request()
343 discard_from_brd(brd, sector, bio->bi_iter.bi_size); in brd_make_request()
347 rw = bio_rw(bio); in brd_make_request()
351 bio_for_each_segment(bvec, bio, iter) { in brd_make_request()
363 bio_endio(bio); in brd_make_request()
[all …]
Dps3vram.c550 static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev, in ps3vram_do_bio()
551 struct bio *bio) in ps3vram_do_bio() argument
554 int write = bio_data_dir(bio) == WRITE; in ps3vram_do_bio()
556 loff_t offset = bio->bi_iter.bi_sector << 9; in ps3vram_do_bio()
560 struct bio *next; in ps3vram_do_bio()
562 bio_for_each_segment(bvec, bio, iter) { in ps3vram_do_bio()
596 bio->bi_error = error; in ps3vram_do_bio()
597 bio_endio(bio); in ps3vram_do_bio()
601 static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio) in ps3vram_make_request() argument
609 blk_queue_split(q, &bio, q->bio_split); in ps3vram_make_request()
[all …]
Dxen-blkfront.c85 struct bio *bio; member
1637 static void split_bio_end(struct bio *bio) in split_bio_end() argument
1639 struct split_bio *split_bio = bio->bi_private; in split_bio_end()
1642 split_bio->bio->bi_phys_segments = 0; in split_bio_end()
1643 split_bio->bio->bi_error = bio->bi_error; in split_bio_end()
1644 bio_endio(split_bio->bio); in split_bio_end()
1647 bio_put(bio); in split_bio_end()
1656 struct bio *bio, *cloned_bio; in blkif_recover() local
1703 merge_bio.head = copy[i].request->bio; in blkif_recover()
1706 copy[i].request->bio = NULL; in blkif_recover()
[all …]
Dloop.c352 struct bio *bio; in lo_read_simple() local
354 __rq_for_each_bio(bio, rq) in lo_read_simple()
355 zero_fill_bio(bio); in lo_read_simple()
400 struct bio *bio; in lo_read_transfer() local
402 __rq_for_each_bio(bio, rq) in lo_read_transfer()
403 zero_fill_bio(bio); in lo_read_transfer()
454 struct bio *bio = cmd->rq->bio; in handle_partial_read() local
456 bio_advance(bio, bytes); in handle_partial_read()
457 zero_fill_bio(bio); in handle_partial_read()
481 struct bio *bio = cmd->rq->bio; in lo_rw_aio() local
[all …]
Dfloppy.c2354 raw_cmd->kernel_data == bio_data(current_req->bio)) { in rw_interrupt()
2373 base = bio_data(current_req->bio); in buffer_chain_size()
2643 } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) { in make_raw_rw_request()
2657 ((unsigned long)bio_data(current_req->bio))) >> 9; in make_raw_rw_request()
2661 if (CROSS_64KB(bio_data(current_req->bio), max_size << 9)) in make_raw_rw_request()
2663 ((unsigned long)bio_data(current_req->bio)) % in make_raw_rw_request()
2680 raw_cmd->kernel_data = bio_data(current_req->bio); in make_raw_rw_request()
2734 (raw_cmd->kernel_data != bio_data(current_req->bio) && in make_raw_rw_request()
2742 if (raw_cmd->kernel_data != bio_data(current_req->bio)) in make_raw_rw_request()
2759 if (raw_cmd->kernel_data != bio_data(current_req->bio)) { in make_raw_rw_request()
[all …]
Dhd.c467 insw(HD_DATA, bio_data(req->bio), 256); in read_intr()
471 blk_rq_sectors(req) - 1, bio_data(req->bio)+512); in read_intr()
508 outsw(HD_DATA, bio_data(req->bio), 256); in write_intr()
627 cyl, head, sec, nsect, bio_data(req->bio)); in hd_request()
646 outsw(HD_DATA, bio_data(req->bio), 256); in hd_request()
Dvirtio_blk.c240 struct bio *bio; in virtblk_get_id() local
243 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, in virtblk_get_id()
245 if (IS_ERR(bio)) in virtblk_get_id()
246 return PTR_ERR(bio); in virtblk_get_id()
248 req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL); in virtblk_get_id()
250 bio_put(bio); in virtblk_get_id()
Drbd.c264 struct bio *bio_list;
1236 static void bio_chain_put(struct bio *chain) in bio_chain_put()
1238 struct bio *tmp; in bio_chain_put()
1250 static void zero_bio_chain(struct bio *chain, int start_ofs) in zero_bio_chain()
1311 static struct bio *bio_clone_range(struct bio *bio_src, in bio_clone_range()
1316 struct bio *bio; in bio_clone_range() local
1318 bio = bio_clone(bio_src, gfpmask); in bio_clone_range()
1319 if (!bio) in bio_clone_range()
1322 bio_advance(bio, offset); in bio_clone_range()
1323 bio->bi_iter.bi_size = len; in bio_clone_range()
[all …]
Dmg_disk.c482 u16 *buff = (u16 *)bio_data(req->bio); in mg_read_one()
499 blk_rq_sectors(req), blk_rq_pos(req), bio_data(req->bio)); in mg_read()
517 u16 *buff = (u16 *)bio_data(req->bio); in mg_write_one()
537 rem, blk_rq_pos(req), bio_data(req->bio)); in mg_write()
588 blk_rq_pos(req), blk_rq_sectors(req) - 1, bio_data(req->bio)); in mg_read_intr()
627 blk_rq_pos(req), blk_rq_sectors(req), bio_data(req->bio)); in mg_write_intr()
/linux-4.4.14/fs/jfs/
Djfs_metapage.c279 static void metapage_read_end_io(struct bio *bio) in metapage_read_end_io() argument
281 struct page *page = bio->bi_private; in metapage_read_end_io()
283 if (bio->bi_error) { in metapage_read_end_io()
289 bio_put(bio); in metapage_read_end_io()
334 static void metapage_write_end_io(struct bio *bio) in metapage_write_end_io() argument
336 struct page *page = bio->bi_private; in metapage_write_end_io()
340 if (bio->bi_error) { in metapage_write_end_io()
345 bio_put(bio); in metapage_write_end_io()
350 struct bio *bio = NULL; in metapage_writepage() local
395 if (bio) { in metapage_writepage()
[all …]
Djfs_logmgr.c1987 struct bio *bio; in lbmRead() local
1998 bio = bio_alloc(GFP_NOFS, 1); in lbmRead()
2000 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmRead()
2001 bio->bi_bdev = log->bdev; in lbmRead()
2003 bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); in lbmRead()
2004 BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); in lbmRead()
2006 bio->bi_end_io = lbmIODone; in lbmRead()
2007 bio->bi_private = bp; in lbmRead()
2010 bio->bi_iter.bi_size = 0; in lbmRead()
2011 lbmIODone(bio); in lbmRead()
[all …]
/linux-4.4.14/drivers/block/drbd/
Ddrbd_req.h260 static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src) in drbd_req_make_private_bio()
262 struct bio *bio; in drbd_req_make_private_bio() local
263 bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */ in drbd_req_make_private_bio()
265 req->private_bio = bio; in drbd_req_make_private_bio()
267 bio->bi_private = req; in drbd_req_make_private_bio()
268 bio->bi_end_io = drbd_request_endio; in drbd_req_make_private_bio()
269 bio->bi_next = NULL; in drbd_req_make_private_bio()
276 struct bio *bio; member
306 if (m.bio) in _req_mod()
328 if (m.bio) in req_mod()
Ddrbd_req.c51 struct bio *bio_src) in drbd_req_new()
204 m->bio->bi_error = m->error; in complete_master_bio()
205 bio_endio(m->bio); in complete_master_bio()
296 m->bio = req->master_bio; in drbd_req_complete()
588 m->bio = NULL; in __req_mod()
1141 struct bio *bio = req->private_bio; in drbd_submit_req_private_bio() local
1142 const int rw = bio_rw(bio); in drbd_submit_req_private_bio()
1144 bio->bi_bdev = device->ldev->backing_bdev; in drbd_submit_req_private_bio()
1157 bio_io_error(bio); in drbd_submit_req_private_bio()
1159 generic_make_request(bio); in drbd_submit_req_private_bio()
[all …]
Ddrbd_worker.c68 void drbd_md_endio(struct bio *bio) in drbd_md_endio() argument
72 device = bio->bi_private; in drbd_md_endio()
73 device->md_io.error = bio->bi_error; in drbd_md_endio()
89 bio_put(bio); in drbd_md_endio()
173 void drbd_peer_request_endio(struct bio *bio) in drbd_peer_request_endio() argument
175 struct drbd_peer_request *peer_req = bio->bi_private; in drbd_peer_request_endio()
177 int is_write = bio_data_dir(bio) == WRITE; in drbd_peer_request_endio()
178 int is_discard = !!(bio->bi_rw & REQ_DISCARD); in drbd_peer_request_endio()
180 if (bio->bi_error && __ratelimit(&drbd_ratelimit_state)) in drbd_peer_request_endio()
183 : "read", bio->bi_error, in drbd_peer_request_endio()
[all …]
Ddrbd_bitmap.c944 static void drbd_bm_endio(struct bio *bio) in drbd_bm_endio() argument
946 struct drbd_bm_aio_ctx *ctx = bio->bi_private; in drbd_bm_endio()
949 unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); in drbd_bm_endio()
955 if (bio->bi_error) { in drbd_bm_endio()
958 ctx->error = bio->bi_error; in drbd_bm_endio()
964 bio->bi_error, idx); in drbd_bm_endio()
973 mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); in drbd_bm_endio()
975 bio_put(bio); in drbd_bm_endio()
986 struct bio *bio = bio_alloc_drbd(GFP_NOIO); in bm_page_io_async() local
1015 bio->bi_bdev = device->ldev->md_bdev; in bm_page_io_async()
[all …]
Ddrbd_int.h303 struct bio *private_bio;
318 struct bio *master_bio; /* master bio pointer */
1425 extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1450 extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1451 extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
1483 extern void drbd_md_endio(struct bio *bio);
1484 extern void drbd_peer_request_endio(struct bio *bio);
1485 extern void drbd_request_endio(struct bio *bio);
1514 extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
1601 int fault_type, struct bio *bio) in drbd_generic_make_request() argument
[all …]
Ddrbd_actlog.c142 struct bio *bio; in _drbd_md_sync_page_io() local
154 bio = bio_alloc_drbd(GFP_NOIO); in _drbd_md_sync_page_io()
155 bio->bi_bdev = bdev->md_bdev; in _drbd_md_sync_page_io()
156 bio->bi_iter.bi_sector = sector; in _drbd_md_sync_page_io()
158 if (bio_add_page(bio, device->md_io.page, size, 0) != size) in _drbd_md_sync_page_io()
160 bio->bi_private = device; in _drbd_md_sync_page_io()
161 bio->bi_end_io = drbd_md_endio; in _drbd_md_sync_page_io()
162 bio->bi_rw = rw; in _drbd_md_sync_page_io()
174 bio_get(bio); /* one bio_put() is in the completion handler */ in _drbd_md_sync_page_io()
178 bio_io_error(bio); in _drbd_md_sync_page_io()
[all …]
/linux-4.4.14/Documentation/block/
Dbiovecs.txt7 As of 3.13, biovecs should never be modified after a bio has been submitted.
9 the iterator will be modified as the bio is completed, not the biovec.
11 More specifically, old code that needed to partially complete a bio would
17 partially complete a bio is segregated into struct bvec_iter: bi_sector,
37 wrapper around bio_advance_iter() that operates on bio->bi_iter, and also
38 advances the bio integrity's iter if present.
41 a pointer to a biovec, not a bio; this is used by the bio integrity code.
50 exactly one bvec at a time - for example, bio_copy_data() in fs/bio.c,
51 which copies the contents of one bio into another. Because the biovecs
61 * Before, any code that might need to use the biovec after the bio had been
[all …]
Drequest.txt82 struct bio *bio DBI First bio in request
84 struct bio *biotail DBI Last bio in request
Ddata-integrity.txt131 The data integrity patches add a new field to struct bio when
132 CONFIG_BLK_DEV_INTEGRITY is enabled. bio_integrity(bio) returns a
133 pointer to a struct bip which contains the bio integrity payload.
134 Essentially a bip is a trimmed down struct bio which holds a bio_vec
138 A kernel subsystem can enable data integrity protection on a bio by
139 calling bio_integrity_alloc(bio). This will allocate and attach the
140 bip to the bio.
195 int bio_integrity_prep(bio);
198 filesystem must call bio_integrity_prep(bio).
200 Prior to calling this function, the bio data direction and start
[all …]
Dbiodoc.txt15 context of the bio rewrite. The idea is to bring out some of the key
23 2.5 bio rewrite:
37 The following people helped with fixes/contributions to the bio patches
58 2.2 The bio struct in detail (multi-page io unit)
62 3.2 Generic bio helper routines
194 cases, a bounce bio representing a buffer from the supported memory range
256 The flags and rw fields in the bio structure can be used for some tuning
272 requests. Some bits in the bi_rw flags field in the bio structure are
303 For passing request data, the caller must build up a bio descriptor
305 bio segments or uses the block layer end*request* functions for i/o
[all …]
Dwriteback_cache_control.txt23 The REQ_FLUSH flag can be OR ed into the r/w flags of a bio submitted from
27 storage before the flagged bio starts. In addition the REQ_FLUSH flag can be
28 set on an otherwise empty bio structure, which causes only an explicit cache
36 The REQ_FUA flag can be OR ed into the r/w flags of a bio submitted from the
47 may both be set on a single bio.
Dnull_blk.txt17 No block-layer (Known as bio-based)
19 - Directly accepts bio data structure and returns them.
60 defaults to 1 on single-queue and bio-based instances. For multi-queue,
/linux-4.4.14/fs/f2fs/
Ddata.c30 static void f2fs_read_end_io(struct bio *bio) in f2fs_read_end_io() argument
35 if (f2fs_bio_encrypted(bio)) { in f2fs_read_end_io()
36 if (bio->bi_error) { in f2fs_read_end_io()
37 f2fs_release_crypto_ctx(bio->bi_private); in f2fs_read_end_io()
39 f2fs_end_io_crypto_work(bio->bi_private, bio); in f2fs_read_end_io()
44 bio_for_each_segment_all(bvec, bio, i) { in f2fs_read_end_io()
47 if (!bio->bi_error) { in f2fs_read_end_io()
55 bio_put(bio); in f2fs_read_end_io()
58 static void f2fs_write_end_io(struct bio *bio) in f2fs_write_end_io() argument
60 struct f2fs_sb_info *sbi = bio->bi_private; in f2fs_write_end_io()
[all …]
Dcrypto.c153 struct bio *bio = ctx->r.bio; in completion_pages() local
157 bio_for_each_segment_all(bv, bio, i) { in completion_pages()
169 bio_put(bio); in completion_pages()
172 void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *ctx, struct bio *bio) in f2fs_end_io_crypto_work() argument
175 ctx->r.bio = bio; in f2fs_end_io_crypto_work()
Df2fs_crypto.h96 struct bio *bio; member
Df2fs.h692 struct bio *bio; /* bios to merge */ member
1294 static inline struct bio *f2fs_bio_alloc(int npages) in f2fs_bio_alloc()
1296 struct bio *bio; in f2fs_bio_alloc() local
1299 bio = bio_alloc(GFP_NOIO, npages); in f2fs_bio_alloc()
1300 if (!bio) in f2fs_bio_alloc()
1301 bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages); in f2fs_bio_alloc()
1302 return bio; in f2fs_bio_alloc()
2103 static inline bool f2fs_bio_encrypted(struct bio *bio) in f2fs_bio_encrypted() argument
2106 return unlikely(bio->bi_private != NULL); in f2fs_bio_encrypted()
2148 void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *, struct bio *);
/linux-4.4.14/drivers/scsi/osd/
Dosd_initiator.c450 if (unlikely(rq->bio)) in _put_request()
718 struct bio *bio; in _osd_req_list_objects() local
725 WARN_ON(or->in.bio); in _osd_req_list_objects()
726 bio = bio_map_kern(q, list, len, or->alloc_flags); in _osd_req_list_objects()
727 if (IS_ERR(bio)) { in _osd_req_list_objects()
729 return PTR_ERR(bio); in _osd_req_list_objects()
732 bio->bi_rw &= ~REQ_WRITE; in _osd_req_list_objects()
733 or->in.bio = bio; in _osd_req_list_objects()
734 or->in.total_bytes = bio->bi_iter.bi_size; in _osd_req_list_objects()
826 struct bio *bio, u64 len) in osd_req_write() argument
[all …]
/linux-4.4.14/fs/gfs2/
Dlops.c205 static void gfs2_end_log_write(struct bio *bio) in gfs2_end_log_write() argument
207 struct gfs2_sbd *sdp = bio->bi_private; in gfs2_end_log_write()
212 if (bio->bi_error) { in gfs2_end_log_write()
213 sdp->sd_log_error = bio->bi_error; in gfs2_end_log_write()
214 fs_err(sdp, "Error %d writing to log\n", bio->bi_error); in gfs2_end_log_write()
217 bio_for_each_segment_all(bvec, bio, i) { in gfs2_end_log_write()
220 gfs2_end_log_write_bh(sdp, bvec, bio->bi_error); in gfs2_end_log_write()
225 bio_put(bio); in gfs2_end_log_write()
261 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno) in gfs2_log_alloc_bio()
264 struct bio *bio; in gfs2_log_alloc_bio() local
[all …]
Dops_fstype.c174 static void end_bio_io_page(struct bio *bio) in end_bio_io_page() argument
176 struct page *page = bio->bi_private; in end_bio_io_page()
178 if (!bio->bi_error) in end_bio_io_page()
181 pr_warn("error %d reading superblock\n", bio->bi_error); in end_bio_io_page()
232 struct bio *bio; in gfs2_read_super() local
242 bio = bio_alloc(GFP_NOFS, 1); in gfs2_read_super()
243 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); in gfs2_read_super()
244 bio->bi_bdev = sb->s_bdev; in gfs2_read_super()
245 bio_add_page(bio, page, PAGE_SIZE, 0); in gfs2_read_super()
247 bio->bi_end_io = end_bio_io_page; in gfs2_read_super()
[all …]
/linux-4.4.14/drivers/s390/block/
Dxpram.c184 static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio) in xpram_make_request() argument
186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; in xpram_make_request()
193 blk_queue_split(q, &bio, q->bio_split); in xpram_make_request()
195 if ((bio->bi_iter.bi_sector & 7) != 0 || in xpram_make_request()
196 (bio->bi_iter.bi_size & 4095) != 0) in xpram_make_request()
199 if ((bio->bi_iter.bi_size >> 12) > xdev->size) in xpram_make_request()
202 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) in xpram_make_request()
204 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; in xpram_make_request()
205 bio_for_each_segment(bvec, bio, iter) { in xpram_make_request()
213 if (bio_data_dir(bio) == READ) { in xpram_make_request()
[all …]
Ddcssblk.c31 struct bio *bio);
820 dcssblk_make_request(struct request_queue *q, struct bio *bio) in dcssblk_make_request() argument
830 blk_queue_split(q, &bio, q->bio_split); in dcssblk_make_request()
833 dev_info = bio->bi_bdev->bd_disk->private_data; in dcssblk_make_request()
836 if ((bio->bi_iter.bi_sector & 7) != 0 || in dcssblk_make_request()
837 (bio->bi_iter.bi_size & 4095) != 0) in dcssblk_make_request()
840 if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { in dcssblk_make_request()
851 if (bio_data_dir(bio) == WRITE) { in dcssblk_make_request()
860 index = (bio->bi_iter.bi_sector >> 3); in dcssblk_make_request()
861 bio_for_each_segment(bvec, bio, iter) { in dcssblk_make_request()
[all …]
Ddasd_diag.c61 struct dasd_diag_bio bio[0]; member
191 private->iob.bio_list = dreq->bio; in dasd_start_diag()
326 struct dasd_diag_bio bio; in dasd_diag_check_device() local
409 memset(&bio, 0, sizeof (struct dasd_diag_bio)); in dasd_diag_check_device()
410 bio.type = MDSK_READ_REQ; in dasd_diag_check_device()
411 bio.block_number = private->pt_block + 1; in dasd_diag_check_device()
412 bio.buffer = label; in dasd_diag_check_device()
419 private->iob.bio_list = &bio; in dasd_diag_check_device()
552 dbio = dreq->bio; in dasd_diag_build_cp()
/linux-4.4.14/Documentation/DocBook/
Dtracepoint.xml.db15 API-trace-block-bio-bounce
16 API-trace-block-bio-complete
17 API-trace-block-bio-backmerge
18 API-trace-block-bio-frontmerge
19 API-trace-block-bio-queue
25 API-trace-block-bio-remap
Dfilesystems.xml.db112 API-bio-reset
113 API-bio-chain
114 API-bio-alloc-bioset
115 API-bio-put
116 API---bio-clone-fast
117 API-bio-clone-fast
118 API-bio-clone-bioset
119 API-bio-add-pc-page
120 API-bio-add-page
121 API-submit-bio-wait
[all …]
D.filesystems.xml.cmd2 …c fs/super.c fs/locks.c fs/locks.c fs/mpage.c fs/namei.c fs/buffer.c block/bio.c fs/seq_file.c fs/…
/linux-4.4.14/drivers/scsi/
Dsd_dif.c117 struct bio *bio; in sd_dif_prepare() local
129 __rq_for_each_bio(bio, scmd->request) { in sd_dif_prepare()
130 struct bio_integrity_payload *bip = bio_integrity(bio); in sd_dif_prepare()
168 struct bio *bio; in sd_dif_complete() local
181 __rq_for_each_bio(bio, scmd->request) { in sd_dif_complete()
182 struct bio_integrity_payload *bip = bio_integrity(bio); in sd_dif_complete()
Dst.h32 struct bio *bio; member
/linux-4.4.14/include/scsi/
Dosd_initiator.h143 struct bio *bio; member
430 const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
434 const struct osd_obj_id *, struct bio *data_out);/* NI */
436 const struct osd_obj_id *, struct bio *data_out, u64 offset);/* NI */
447 const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
453 const struct osd_obj_id *obj, struct bio *bio,
456 const struct osd_obj_id *obj, struct bio *bio,
Dosd_sec.h40 void osd_sec_sign_data(void *data_integ, struct bio *bio, const u8 *cap_key);
Dosd_ore.h158 struct bio *bio; member
/linux-4.4.14/drivers/nvdimm/
Dblk.c164 static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) in nd_blk_make_request() argument
166 struct block_device *bdev = bio->bi_bdev; in nd_blk_make_request()
182 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { in nd_blk_make_request()
183 bio->bi_error = -EIO; in nd_blk_make_request()
187 bip = bio_integrity(bio); in nd_blk_make_request()
189 rw = bio_data_dir(bio); in nd_blk_make_request()
190 do_acct = nd_iostat_start(bio, &start); in nd_blk_make_request()
191 bio_for_each_segment(bvec, bio, iter) { in nd_blk_make_request()
202 bio->bi_error = err; in nd_blk_make_request()
207 nd_iostat_end(bio, start); in nd_blk_make_request()
[all …]
Dnd.h266 void __nd_iostat_start(struct bio *bio, unsigned long *start);
267 static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) in nd_iostat_start() argument
269 struct gendisk *disk = bio->bi_bdev->bd_disk; in nd_iostat_start()
274 __nd_iostat_start(bio, start); in nd_iostat_start()
277 void nd_iostat_end(struct bio *bio, unsigned long start);
Dcore.c217 void __nd_iostat_start(struct bio *bio, unsigned long *start) in __nd_iostat_start() argument
219 struct gendisk *disk = bio->bi_bdev->bd_disk; in __nd_iostat_start()
220 const int rw = bio_data_dir(bio); in __nd_iostat_start()
226 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio)); in __nd_iostat_start()
232 void nd_iostat_end(struct bio *bio, unsigned long start) in nd_iostat_end() argument
234 struct gendisk *disk = bio->bi_bdev->bd_disk; in nd_iostat_end()
236 const int rw = bio_data_dir(bio); in nd_iostat_end()
Dpmem.c67 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) in pmem_make_request() argument
73 struct block_device *bdev = bio->bi_bdev; in pmem_make_request()
76 do_acct = nd_iostat_start(bio, &start); in pmem_make_request()
77 bio_for_each_segment(bvec, bio, iter) in pmem_make_request()
79 bio_data_dir(bio), iter.bi_sector); in pmem_make_request()
81 nd_iostat_end(bio, start); in pmem_make_request()
83 if (bio_data_dir(bio)) in pmem_make_request()
86 bio_endio(bio); in pmem_make_request()
Dbtt.c1153 static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) in btt_make_request() argument
1155 struct bio_integrity_payload *bip = bio_integrity(bio); in btt_make_request()
1169 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { in btt_make_request()
1170 bio->bi_error = -EIO; in btt_make_request()
1174 do_acct = nd_iostat_start(bio, &start); in btt_make_request()
1175 rw = bio_data_dir(bio); in btt_make_request()
1176 bio_for_each_segment(bvec, bio, iter) { in btt_make_request()
1192 bio->bi_error = err; in btt_make_request()
1197 nd_iostat_end(bio, start); in btt_make_request()
1200 bio_endio(bio); in btt_make_request()
/linux-4.4.14/kernel/trace/
Dblktrace.c770 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, in blk_add_trace_bio() argument
778 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, in blk_add_trace_bio()
779 bio->bi_rw, what, error, 0, NULL); in blk_add_trace_bio()
783 struct request_queue *q, struct bio *bio) in blk_add_trace_bio_bounce() argument
785 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); in blk_add_trace_bio_bounce()
789 struct request_queue *q, struct bio *bio, in blk_add_trace_bio_complete() argument
792 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); in blk_add_trace_bio_complete()
798 struct bio *bio) in blk_add_trace_bio_backmerge() argument
800 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); in blk_add_trace_bio_backmerge()
806 struct bio *bio) in blk_add_trace_bio_frontmerge() argument
[all …]
/linux-4.4.14/Documentation/ABI/testing/
Dsysfs-class-pktcdvd42 size (0444) Contains the size of the bio write
45 congestion_off (0644) If bio write queue size is below
46 this mark, accept new bio requests
49 congestion_on (0644) If bio write queue size is higher
51 bio write requests from the block
53 device has processed enough bio's
54 so that bio write queue size is
/linux-4.4.14/fs/exofs/
Dore.c47 (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),};
328 if (per_dev->bio) in ore_put_io_state()
329 bio_put(per_dev->bio); in ore_put_io_state()
405 static void _clear_bio(struct bio *bio) in _clear_bio() argument
410 bio_for_each_segment_all(bv, bio, i) { in _clear_bio()
440 per_dev->bio) { in ore_check_io()
445 _clear_bio(per_dev->bio); in ore_check_io()
603 if (per_dev->bio == NULL) { in _ore_add_stripe_unit()
615 per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size); in _ore_add_stripe_unit()
616 if (unlikely(!per_dev->bio)) { in _ore_add_stripe_unit()
[all …]
Dore_raid.c360 per_dev->bio = bio_kmalloc(GFP_KERNEL, in _add_to_r4w()
362 if (unlikely(!per_dev->bio)) { in _add_to_r4w()
375 added_len = bio_add_pc_page(q, per_dev->bio, page, pg_len, in _add_to_r4w()
379 per_dev->bio->bi_vcnt); in _add_to_r4w()
435 struct bio *bio = ios->per_dev[d].bio; in _mark_read4write_pages_uptodate() local
437 if (!bio) in _mark_read4write_pages_uptodate()
440 bio_for_each_segment_all(bv, bio, i) { in _mark_read4write_pages_uptodate()
/linux-4.4.14/fs/hfsplus/
Dwrapper.c49 struct bio *bio; in hfsplus_submit_bio() local
65 bio = bio_alloc(GFP_NOIO, 1); in hfsplus_submit_bio()
66 bio->bi_iter.bi_sector = sector; in hfsplus_submit_bio()
67 bio->bi_bdev = sb->s_bdev; in hfsplus_submit_bio()
77 ret = bio_add_page(bio, virt_to_page(buf), len, page_offset); in hfsplus_submit_bio()
86 ret = submit_bio_wait(rw, bio); in hfsplus_submit_bio()
88 bio_put(bio); in hfsplus_submit_bio()
/linux-4.4.14/drivers/block/aoe/
Daoecmd.c297 skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter) in skb_fillup() argument
302 __bio_for_each_segment(bv, bio, iter, iter) in skb_fillup()
352 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) { in ata_rw_frameinit()
353 skb_fillup(skb, f->buf->bio, f->iter); in ata_rw_frameinit()
389 bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size); in aoecmd_ata_rw()
848 struct bio *bio; in rqbiocnt() local
851 __rq_for_each_bio(bio, r) in rqbiocnt()
867 bio_pageinc(struct bio *bio) in bio_pageinc() argument
873 bio_for_each_segment(bv, bio, iter) { in bio_pageinc()
883 bio_pagedec(struct bio *bio) in bio_pagedec() argument
[all …]
Daoe.h103 struct bio *bio; member
176 struct bio *nxbio;
Daoedev.c164 struct bio *bio; in aoe_failip() local
172 while ((bio = d->ip.nxbio)) { in aoe_failip()
173 bio->bi_error = -EIO; in aoe_failip()
174 d->ip.nxbio = bio->bi_next; in aoe_failip()
/linux-4.4.14/arch/m68k/emu/
Dnfblock.c62 static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio) in nfhd_make_request() argument
68 sector_t sec = bio->bi_iter.bi_sector; in nfhd_make_request()
70 dir = bio_data_dir(bio); in nfhd_make_request()
72 bio_for_each_segment(bvec, bio, iter) { in nfhd_make_request()
79 bio_endio(bio); in nfhd_make_request()
/linux-4.4.14/include/linux/ceph/
Dmessenger.h99 struct bio *bio; member
123 struct bio *bio; /* bio from list */ member
290 extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
Dosd_client.h70 struct bio *bio; /* list of bios */ member
285 struct bio *bio, size_t bio_length);
/linux-4.4.14/fs/xfs/
Dxfs_aops.c361 struct bio *bio) in xfs_end_bio() argument
363 xfs_ioend_t *ioend = bio->bi_private; in xfs_end_bio()
366 ioend->io_error = bio->bi_error; in xfs_end_bio()
369 bio->bi_private = NULL; in xfs_end_bio()
370 bio->bi_end_io = NULL; in xfs_end_bio()
371 bio_put(bio); in xfs_end_bio()
380 struct bio *bio) in xfs_submit_ioend_bio() argument
383 bio->bi_private = ioend; in xfs_submit_ioend_bio()
384 bio->bi_end_io = xfs_end_bio; in xfs_submit_ioend_bio()
385 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); in xfs_submit_ioend_bio()
[all …]
Dxfs_buf.c1105 struct bio *bio) in xfs_buf_bio_end_io() argument
1107 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; in xfs_buf_bio_end_io()
1113 if (bio->bi_error) { in xfs_buf_bio_end_io()
1116 bp->b_io_error = bio->bi_error; in xfs_buf_bio_end_io()
1125 bio_put(bio); in xfs_buf_bio_end_io()
1139 struct bio *bio; in xfs_buf_ioapply_map() local
1168 bio = bio_alloc(GFP_NOIO, nr_pages); in xfs_buf_ioapply_map()
1169 bio->bi_bdev = bp->b_target->bt_bdev; in xfs_buf_ioapply_map()
1170 bio->bi_iter.bi_sector = sector; in xfs_buf_ioapply_map()
1171 bio->bi_end_io = xfs_buf_bio_end_io; in xfs_buf_ioapply_map()
[all …]
/linux-4.4.14/arch/powerpc/sysdev/
Daxonram.c107 axon_ram_make_request(struct request_queue *queue, struct bio *bio) in axon_ram_make_request() argument
109 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; in axon_ram_make_request()
116 phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << in axon_ram_make_request()
120 bio_for_each_segment(vec, bio, iter) { in axon_ram_make_request()
122 bio_io_error(bio); in axon_ram_make_request()
127 if (bio_data_dir(bio) == READ) in axon_ram_make_request()
135 bio_endio(bio); in axon_ram_make_request()
/linux-4.4.14/kernel/power/
Dswap.c230 static void hib_end_io(struct bio *bio) in hib_end_io() argument
232 struct hib_bio_batch *hb = bio->bi_private; in hib_end_io()
233 struct page *page = bio->bi_io_vec[0].bv_page; in hib_end_io()
235 if (bio->bi_error) { in hib_end_io()
237 imajor(bio->bi_bdev->bd_inode), in hib_end_io()
238 iminor(bio->bi_bdev->bd_inode), in hib_end_io()
239 (unsigned long long)bio->bi_iter.bi_sector); in hib_end_io()
242 if (bio_data_dir(bio) == WRITE) in hib_end_io()
245 if (bio->bi_error && !hb->error) in hib_end_io()
246 hb->error = bio->bi_error; in hib_end_io()
[all …]
/linux-4.4.14/drivers/scsi/libsas/
Dsas_host_smp.c239 if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE || in sas_smp_host_handler()
240 bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) { in sas_smp_host_handler()
258 buf = kmap_atomic(bio_page(req->bio)); in sas_smp_host_handler()
260 kunmap_atomic(buf - bio_offset(req->bio)); in sas_smp_host_handler()
373 buf = kmap_atomic(bio_page(rsp->bio)); in sas_smp_host_handler()
375 flush_kernel_dcache_page(bio_page(rsp->bio)); in sas_smp_host_handler()
376 kunmap_atomic(buf - bio_offset(rsp->bio)); in sas_smp_host_handler()
/linux-4.4.14/drivers/block/xen-blkback/
Dblkback.c1086 static void end_block_io_op(struct bio *bio) in end_block_io_op() argument
1088 __end_block_io_op(bio->bi_private, bio->bi_error); in end_block_io_op()
1089 bio_put(bio); in end_block_io_op()
1207 struct bio *bio = NULL; in dispatch_rw_block_io() local
1208 struct bio **biolist = pending_req->biolist; in dispatch_rw_block_io()
1335 while ((bio == NULL) || in dispatch_rw_block_io()
1336 (bio_add_page(bio, in dispatch_rw_block_io()
1342 bio = bio_alloc(GFP_KERNEL, nr_iovecs); in dispatch_rw_block_io()
1343 if (unlikely(bio == NULL)) in dispatch_rw_block_io()
1346 biolist[nbio++] = bio; in dispatch_rw_block_io()
[all …]
/linux-4.4.14/fs/ocfs2/cluster/
Dheartbeat.c376 static void o2hb_bio_end_io(struct bio *bio) in o2hb_bio_end_io() argument
378 struct o2hb_bio_wait_ctxt *wc = bio->bi_private; in o2hb_bio_end_io()
380 if (bio->bi_error) { in o2hb_bio_end_io()
381 mlog(ML_ERROR, "IO Error %d\n", bio->bi_error); in o2hb_bio_end_io()
382 wc->wc_error = bio->bi_error; in o2hb_bio_end_io()
386 bio_put(bio); in o2hb_bio_end_io()
391 static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, in o2hb_setup_one_bio()
401 struct bio *bio; in o2hb_setup_one_bio() local
408 bio = bio_alloc(GFP_ATOMIC, 16); in o2hb_setup_one_bio()
409 if (!bio) { in o2hb_setup_one_bio()
[all …]
/linux-4.4.14/drivers/nvme/host/
Dlightnvm.c450 rqd->bio->bi_iter.bi_sector)); in nvme_nvm_rqtocmd()
471 struct bio *bio = rqd->bio; in nvme_nvm_submit_io() local
474 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0); in nvme_nvm_submit_io()
485 rq->ioprio = bio_prio(bio); in nvme_nvm_submit_io()
487 if (bio_has_data(bio)) in nvme_nvm_submit_io()
488 rq->nr_phys_segments = bio_phys_segments(q, bio); in nvme_nvm_submit_io()
490 rq->__data_len = bio->bi_iter.bi_size; in nvme_nvm_submit_io()
491 rq->bio = rq->biotail = bio; in nvme_nvm_submit_io()
/linux-4.4.14/arch/xtensa/platforms/iss/
Dsimdisk.c104 static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio) in simdisk_make_request() argument
109 sector_t sector = bio->bi_iter.bi_sector; in simdisk_make_request()
111 bio_for_each_segment(bvec, bio, iter) { in simdisk_make_request()
112 char *buffer = __bio_kmap_atomic(bio, iter); in simdisk_make_request()
116 bio_data_dir(bio) == WRITE); in simdisk_make_request()
121 bio_endio(bio); in simdisk_make_request()
/linux-4.4.14/drivers/block/zram/
Dzram_drv.c779 int offset, struct bio *bio) in zram_bio_discard() argument
781 size_t n = bio->bi_iter.bi_size; in zram_bio_discard()
841 static void __zram_make_request(struct zram *zram, struct bio *bio) in __zram_make_request() argument
848 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; in __zram_make_request()
849 offset = (bio->bi_iter.bi_sector & in __zram_make_request()
852 if (unlikely(bio->bi_rw & REQ_DISCARD)) { in __zram_make_request()
853 zram_bio_discard(zram, index, offset, bio); in __zram_make_request()
854 bio_endio(bio); in __zram_make_request()
858 rw = bio_data_dir(bio); in __zram_make_request()
859 bio_for_each_segment(bvec, bio, iter) { in __zram_make_request()
[all …]
/linux-4.4.14/net/ceph/
Dmessenger.c829 struct bio *bio; in ceph_msg_data_bio_cursor_init() local
833 bio = data->bio; in ceph_msg_data_bio_cursor_init()
834 BUG_ON(!bio); in ceph_msg_data_bio_cursor_init()
837 cursor->bio = bio; in ceph_msg_data_bio_cursor_init()
838 cursor->bvec_iter = bio->bi_iter; in ceph_msg_data_bio_cursor_init()
840 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter); in ceph_msg_data_bio_cursor_init()
848 struct bio *bio; in ceph_msg_data_bio_next() local
853 bio = cursor->bio; in ceph_msg_data_bio_next()
854 BUG_ON(!bio); in ceph_msg_data_bio_next()
856 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); in ceph_msg_data_bio_next()
[all …]
/linux-4.4.14/Documentation/device-mapper/
Ddm-io.txt36 The second I/O service type takes an array of bio vectors as the data buffer
37 for the I/O. This service can be handy if the caller has a pre-assembled bio,
38 but wants to direct different portions of the bio to different devices.
Ddm-flakey.txt38 each matching bio with <value>.
45 <flags>: Perform the replacement only if bio->bi_rw has all the
/linux-4.4.14/Documentation/cgroups/
Dblkio-controller.txt204 - Number of IOs (bio) issued to the disk by the group. These
330 - Number of IOs (bio) issued to the disk by the group. These
427 address_space_operations->writepage[s]() to annotate bio's using the
430 * wbc_init_bio(@wbc, @bio)
432 Should be called for each bio carrying writeback data and associates
433 the bio with the inode's owner cgroup. Can be called anytime
434 between bio allocation and submission.
441 data segments are added to a bio.
443 With writeback bio's annotated, cgroup support can be enabled per
449 wbc_init_bio() binds the specified bio to its cgroup. Depending on
[all …]

12