Home
last modified time | relevance | path

Searched refs:bio (Results 1 – 200 of 247) sorted by relevance

12

/linux-4.1.27/include/linux/
Dbio.h51 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) argument
52 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) argument
54 #define bio_set_prio(bio, prio) do { \ argument
56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
83 #define bio_iter_iovec(bio, iter) \ argument
84 bvec_iter_bvec((bio)->bi_io_vec, (iter))
86 #define bio_iter_page(bio, iter) \ argument
87 bvec_iter_page((bio)->bi_io_vec, (iter))
88 #define bio_iter_len(bio, iter) \ argument
[all …]
Dblk_types.h11 struct bio;
17 typedef void (bio_end_io_t) (struct bio *, int);
18 typedef void (bio_destructor_t) (struct bio *);
46 struct bio { struct
47 struct bio *bi_next; /* request queue link */ argument
109 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
133 #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) argument
142 #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) argument
Ddevice-mapper.h48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
66 struct bio *bio, int error);
202 typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
298 struct bio clone;
301 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size) in dm_per_bio_data() argument
303 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; in dm_per_bio_data()
306 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) in dm_bio_from_per_bio_data()
308 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); in dm_bio_from_per_bio_data()
311 static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio) in dm_bio_get_target_bio_nr() argument
313 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr()
[all …]
Ddm-region-hash.h50 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio);
79 void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
81 void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
Delevator.h13 struct bio *);
19 typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
22 struct request *, struct bio *);
34 struct bio *, gfp_t);
124 extern int elv_merge(struct request_queue *, struct request **, struct bio *);
129 struct bio *);
138 struct bio *bio, gfp_t gfp_mask);
158 extern bool elv_rq_merge_ok(struct request *, struct bio *);
Dpktcdvd.h117 struct bio *w_bio; /* The bio we will send to the real CD */
132 struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
145 struct bio *bio; member
150 struct bio *bio; /* Original read request bio */ member
Dblkdev.h120 struct bio *bio; member
121 struct bio *biotail;
237 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
700 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) in blk_write_same_mergeable()
741 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
747 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) in blk_queue_bounce() argument
763 struct bio *bio; member
770 if ((rq->bio)) \
771 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
774 __rq_for_each_bio(_iter.bio, _rq) \
[all …]
Dswap.h18 struct bio;
376 extern void end_swap_bio_write(struct bio *bio, int err);
378 void (*end_write_func)(struct bio *, int));
380 extern void end_swap_bio_read(struct bio *bio, int err);
Ddm-io.h44 struct bio *bio; member
Dfs.h2356 #define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK)) argument
2361 #define bio_data_dir(bio) ((bio)->bi_rw & 1) argument
2582 extern void submit_bio(int, struct bio *);
2645 typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
2662 void dio_end_io(struct bio *bio, int error);
/linux-4.1.27/block/
Dbio.c73 unsigned int sz = sizeof(struct bio) + extra_size; in bio_find_or_create_slab()
237 static void __bio_free(struct bio *bio) in __bio_free() argument
239 bio_disassociate_task(bio); in __bio_free()
241 if (bio_integrity(bio)) in __bio_free()
242 bio_integrity_free(bio); in __bio_free()
245 static void bio_free(struct bio *bio) in bio_free() argument
247 struct bio_set *bs = bio->bi_pool; in bio_free()
250 __bio_free(bio); in bio_free()
253 if (bio_flagged(bio, BIO_OWNS_VEC)) in bio_free()
254 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio)); in bio_free()
[all …]
Dblk-map.c13 struct bio *bio) in blk_rq_append_bio() argument
15 if (!rq->bio) in blk_rq_append_bio()
16 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio()
17 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio()
20 rq->biotail->bi_next = bio; in blk_rq_append_bio()
21 rq->biotail = bio; in blk_rq_append_bio()
23 rq->__data_len += bio->bi_iter.bi_size; in blk_rq_append_bio()
28 static int __blk_rq_unmap_user(struct bio *bio) in __blk_rq_unmap_user() argument
32 if (bio) { in __blk_rq_unmap_user()
33 if (bio_flagged(bio, BIO_USER_MAPPED)) in __blk_rq_unmap_user()
[all …]
Dblk-merge.c13 struct bio *bio, in __blk_recalc_rq_segments() argument
19 struct bio *fbio, *bbio; in __blk_recalc_rq_segments()
22 if (!bio) in __blk_recalc_rq_segments()
29 if (bio->bi_rw & REQ_DISCARD) in __blk_recalc_rq_segments()
32 if (bio->bi_rw & REQ_WRITE_SAME) in __blk_recalc_rq_segments()
35 fbio = bio; in __blk_recalc_rq_segments()
40 for_each_bio(bio) { in __blk_recalc_rq_segments()
41 bio_for_each_segment(bv, bio, iter) { in __blk_recalc_rq_segments()
78 bbio = bio; in __blk_recalc_rq_segments()
94 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments()
[all …]
Dbio-integrity.c45 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, in bio_integrity_alloc() argument
50 struct bio_set *bs = bio->bi_pool; in bio_integrity_alloc()
80 bip->bip_bio = bio; in bio_integrity_alloc()
81 bio->bi_integrity = bip; in bio_integrity_alloc()
82 bio->bi_rw |= REQ_INTEGRITY; in bio_integrity_alloc()
98 void bio_integrity_free(struct bio *bio) in bio_integrity_free() argument
100 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_free()
101 struct bio_set *bs = bio->bi_pool; in bio_integrity_free()
117 bio->bi_integrity = NULL; in bio_integrity_free()
130 int bio_integrity_add_page(struct bio *bio, struct page *page, in bio_integrity_add_page() argument
[all …]
Dbounce.c101 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq()
125 static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) in bounce_end_io() argument
127 struct bio *bio_orig = bio->bi_private; in bounce_end_io()
131 if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) in bounce_end_io()
137 bio_for_each_segment_all(bvec, bio, i) { in bounce_end_io()
147 bio_put(bio); in bounce_end_io()
150 static void bounce_end_io_write(struct bio *bio, int err) in bounce_end_io_write() argument
152 bounce_end_io(bio, page_pool, err); in bounce_end_io_write()
155 static void bounce_end_io_write_isa(struct bio *bio, int err) in bounce_end_io_write_isa() argument
158 bounce_end_io(bio, isa_page_pool, err); in bounce_end_io_write_isa()
[all …]
Dblk-lib.c18 static void bio_batch_end_io(struct bio *bio, int err) in bio_batch_end_io() argument
20 struct bio_batch *bb = bio->bi_private; in bio_batch_end_io()
26 bio_put(bio); in bio_batch_end_io()
49 struct bio *bio; in blkdev_issue_discard() local
89 bio = bio_alloc(gfp_mask, 1); in blkdev_issue_discard()
90 if (!bio) { in blkdev_issue_discard()
111 bio->bi_iter.bi_sector = sector; in blkdev_issue_discard()
112 bio->bi_end_io = bio_batch_end_io; in blkdev_issue_discard()
113 bio->bi_bdev = bdev; in blkdev_issue_discard()
114 bio->bi_private = &bb; in blkdev_issue_discard()
[all …]
Dblk-core.c117 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument
121 clear_bit(BIO_UPTODATE, &bio->bi_flags); in req_bio_endio()
122 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) in req_bio_endio()
126 set_bit(BIO_QUIET, &bio->bi_flags); in req_bio_endio()
128 bio_advance(bio, nbytes); in req_bio_endio()
131 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) in req_bio_endio()
132 bio_endio(bio, error); in req_bio_endio()
147 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
737 static void blk_queue_bio(struct request_queue *q, struct bio *bio);
929 static bool blk_rq_should_init_elevator(struct bio *bio) in blk_rq_should_init_elevator() argument
[all …]
Dblk-throttle.c323 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, in throtl_qnode_add_bio() argument
326 bio_list_add(&qn->bios, bio); in throtl_qnode_add_bio()
337 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued()
340 struct bio *bio; in throtl_peek_queued() local
345 bio = bio_list_peek(&qn->bios); in throtl_peek_queued()
346 WARN_ON_ONCE(!bio); in throtl_peek_queued()
347 return bio; in throtl_peek_queued()
364 static struct bio *throtl_pop_queued(struct list_head *queued, in throtl_pop_queued()
368 struct bio *bio; in throtl_pop_queued() local
373 bio = bio_list_pop(&qn->bios); in throtl_pop_queued()
[all …]
Dblk.h58 void init_request_from_bio(struct request *req, struct bio *bio);
60 struct bio *bio);
62 struct bio *bio);
77 struct bio *bio);
79 struct bio *bio);
80 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
182 struct bio *bio);
184 struct bio *bio);
191 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
192 int blk_try_merge(struct request *rq, struct bio *bio);
[all …]
Dblk-integrity.c44 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) in blk_rq_count_integrity_sg() argument
52 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_count_integrity_sg()
89 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, in blk_rq_map_integrity_sg() argument
98 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_map_integrity_sg()
198 if (bio_integrity(req->bio)->bip_flags != in blk_integrity_merge_rq()
199 bio_integrity(next->bio)->bip_flags) in blk_integrity_merge_rq()
211 struct bio *bio) in blk_integrity_merge_bio() argument
214 struct bio *next = bio->bi_next; in blk_integrity_merge_bio()
216 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) in blk_integrity_merge_bio()
219 if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL) in blk_integrity_merge_bio()
[all …]
Dblk-cgroup.h190 static inline struct blkcg *bio_blkcg(struct bio *bio) in bio_blkcg() argument
192 if (bio && bio->bi_css) in bio_blkcg()
193 return css_to_blkcg(bio->bi_css); in bio_blkcg()
325 struct bio *bio) in blk_get_rl() argument
332 blkcg = bio_blkcg(bio); in blk_get_rl()
584 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } in bio_blkcg() argument
594 struct bio *bio) { return &q->root_rl; } in blk_get_rl() argument
Dblk-mq.c694 struct blk_mq_ctx *ctx, struct bio *bio) in blk_mq_attempt_merge() argument
705 if (!blk_rq_merge_ok(rq, bio)) in blk_mq_attempt_merge()
708 el_ret = blk_try_merge(rq, bio); in blk_mq_attempt_merge()
710 if (bio_attempt_back_merge(q, rq, bio)) { in blk_mq_attempt_merge()
716 if (bio_attempt_front_merge(q, rq, bio)) { in blk_mq_attempt_merge()
1150 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) in blk_mq_bio_to_request() argument
1152 init_request_from_bio(rq, bio); in blk_mq_bio_to_request()
1166 struct request *rq, struct bio *bio) in blk_mq_merge_queue_io() argument
1169 blk_mq_bio_to_request(rq, bio); in blk_mq_merge_queue_io()
1179 if (!blk_mq_attempt_merge(q, ctx, bio)) { in blk_mq_merge_queue_io()
[all …]
Delevator.c56 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_merge() argument
62 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); in elv_iosched_allow_merge()
70 bool elv_rq_merge_ok(struct request *rq, struct bio *bio) in elv_rq_merge_ok() argument
72 if (!blk_rq_merge_ok(rq, bio)) in elv_rq_merge_ok()
75 if (!elv_iosched_allow_merge(rq, bio)) in elv_rq_merge_ok()
411 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) in elv_merge() argument
429 if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) { in elv_merge()
430 ret = blk_try_merge(q->last_merge, bio); in elv_merge()
443 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); in elv_merge()
444 if (__rq && elv_rq_merge_ok(__rq, bio)) { in elv_merge()
[all …]
Dblk-flush.c125 rq->bio = rq->biotail; in blk_flush_restore_request()
400 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ in blk_insert_flush()
453 struct bio *bio; in blkdev_issue_flush() local
472 bio = bio_alloc(gfp_mask, 0); in blkdev_issue_flush()
473 bio->bi_bdev = bdev; in blkdev_issue_flush()
475 ret = submit_bio_wait(WRITE_FLUSH, bio); in blkdev_issue_flush()
483 *error_sector = bio->bi_iter.bi_sector; in blkdev_issue_flush()
485 bio_put(bio); in blkdev_issue_flush()
Dbsg.c84 struct bio *bio; member
85 struct bio *bidi_bio;
292 blk_rq_unmap_user(next_rq->bio); in bsg_map_hdr()
309 bd->name, rq, bc, bc->bio, uptodate); in bsg_rq_end_io()
334 bc->bio = rq->bio; in bsg_add_command()
336 bc->bidi_bio = rq->next_rq->bio; in bsg_add_command()
394 struct bio *bio, struct bio *bidi_bio) in blk_complete_sgv4_hdr_rq() argument
398 dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); in blk_complete_sgv4_hdr_rq()
441 blk_rq_unmap_user(bio); in blk_complete_sgv4_hdr_rq()
501 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, in bsg_complete_all_commands()
[all …]
Ddeadline-iosched.c125 deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) in deadline_merge() argument
135 sector_t sector = bio_end_sector(bio); in deadline_merge()
137 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); in deadline_merge()
141 if (elv_rq_merge_ok(__rq, bio)) { in deadline_merge()
DMakefile5 obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
24 obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
Dscsi_ioctl.c252 struct bio *bio) in blk_complete_sghdr_rq() argument
279 r = blk_rq_unmap_user(bio); in blk_complete_sghdr_rq()
295 struct bio *bio; in sg_io() local
356 bio = rq->bio; in sg_io()
372 ret = blk_complete_sghdr_rq(rq, hdr, bio); in sg_io()
Dcfq-iosched.c861 struct cfq_io_cq *cic, struct bio *bio,
898 static inline bool cfq_bio_sync(struct bio *bio) in cfq_bio_sync() argument
900 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); in cfq_bio_sync()
2282 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) in cfq_find_rq_fmerge() argument
2292 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); in cfq_find_rq_fmerge()
2294 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio)); in cfq_find_rq_fmerge()
2339 struct bio *bio) in cfq_merge() argument
2344 __rq = cfq_find_rq_fmerge(cfqd, bio); in cfq_merge()
2345 if (__rq && elv_rq_merge_ok(__rq, bio)) { in cfq_merge()
2364 struct bio *bio) in cfq_bio_merged() argument
[all …]
Dbsg-lib.c132 if (req->bio) { in bsg_create_job()
137 if (rsp && rsp->bio) { in bsg_create_job()
DKconfig92 bool "Block layer bio throttling support"
96 Block layer bio throttling support. It can be used to limit
/linux-4.1.27/drivers/md/bcache/
Dio.c14 static unsigned bch_bio_max_sectors(struct bio *bio) in bch_bio_max_sectors() argument
16 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bch_bio_max_sectors()
21 if (bio->bi_rw & REQ_DISCARD) in bch_bio_max_sectors()
22 return min(bio_sectors(bio), q->limits.max_discard_sectors); in bch_bio_max_sectors()
24 bio_for_each_segment(bv, bio, iter) { in bch_bio_max_sectors()
26 .bi_bdev = bio->bi_bdev, in bch_bio_max_sectors()
27 .bi_sector = bio->bi_iter.bi_sector, in bch_bio_max_sectors()
29 .bi_rw = bio->bi_rw, in bch_bio_max_sectors()
47 ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9); in bch_bio_max_sectors()
56 s->bio->bi_end_io = s->bi_end_io; in bch_bio_submit_split_done()
[all …]
Drequest.c28 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) in cache_mode() argument
33 static bool verify(struct cached_dev *dc, struct bio *bio) in verify() argument
38 static void bio_csum(struct bio *bio, struct bkey *k) in bio_csum() argument
44 bio_for_each_segment(bv, bio, iter) { in bio_csum()
118 struct bio *bio = op->bio; in bch_data_invalidate() local
121 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate()
123 while (bio_sectors(bio)) { in bch_data_invalidate()
124 unsigned sectors = min(bio_sectors(bio), in bch_data_invalidate()
130 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate()
131 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate()
[all …]
Dmovinggc.c18 struct bbio bio; member
46 struct bio *bio = &io->bio.bio; in write_moving_finish() local
50 bio_for_each_segment_all(bv, bio, i) in write_moving_finish()
63 static void read_moving_endio(struct bio *bio, int error) in read_moving_endio() argument
65 struct bbio *b = container_of(bio, struct bbio, bio); in read_moving_endio()
66 struct moving_io *io = container_of(bio->bi_private, in read_moving_endio()
76 bch_bbio_endio(io->op.c, bio, error, "reading data to move"); in read_moving_endio()
81 struct bio *bio = &io->bio.bio; in moving_init() local
83 bio_init(bio); in moving_init()
84 bio_get(bio); in moving_init()
[all …]
Dwriteback.c102 struct bio bio; member
108 struct bio *bio = &io->bio; in dirty_init() local
110 bio_init(bio); in dirty_init()
112 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); in dirty_init()
114 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; in dirty_init()
115 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); in dirty_init()
116 bio->bi_private = w; in dirty_init()
117 bio->bi_io_vec = bio->bi_inline_vecs; in dirty_init()
118 bch_bio_map(bio, NULL); in dirty_init()
130 struct keybuf_key *w = io->bio.bi_private; in write_dirty_finish()
[all …]
Djournal.c27 static void journal_read_endio(struct bio *bio, int error) in journal_read_endio() argument
29 struct closure *cl = bio->bi_private; in journal_read_endio()
37 struct bio *bio = &ja->bio; in journal_read_bucket() local
54 bio_reset(bio); in journal_read_bucket()
55 bio->bi_iter.bi_sector = bucket + offset; in journal_read_bucket()
56 bio->bi_bdev = ca->bdev; in journal_read_bucket()
57 bio->bi_rw = READ; in journal_read_bucket()
58 bio->bi_iter.bi_size = len << 9; in journal_read_bucket()
60 bio->bi_end_io = journal_read_endio; in journal_read_bucket()
61 bio->bi_private = &cl; in journal_read_bucket()
[all …]
Ddebug.c34 struct bio *bio; in bch_btree_verify() local
51 bio = bch_bbio_alloc(b->c); in bch_btree_verify()
52 bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; in bch_btree_verify()
53 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in bch_btree_verify()
54 bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; in bch_btree_verify()
55 bch_bio_map(bio, sorted); in bch_btree_verify()
57 submit_bio_wait(REQ_META|READ_SYNC, bio); in bch_btree_verify()
58 bch_bbio_free(bio, b->c); in bch_btree_verify()
105 void bch_data_verify(struct cached_dev *dc, struct bio *bio) in bch_data_verify() argument
108 struct bio *check; in bch_data_verify()
[all …]
Ddebug.h4 struct bio;
11 void bch_data_verify(struct cached_dev *, struct bio *);
20 static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} in bch_data_verify() argument
Dwriteback.h42 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, in should_writeback() argument
53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, in should_writeback()
54 bio_sectors(bio))) in should_writeback()
60 return bio->bi_rw & REQ_SYNC || in should_writeback()
Dbcache.h254 struct bio *bio; member
289 struct bio *, unsigned);
311 struct bio sb_bio;
398 struct bio sb_bio;
694 struct bio bio; member
870 void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
872 void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
873 void bch_bbio_free(struct bio *, struct cache_set *);
874 struct bio *bch_bbio_alloc(struct cache_set *);
876 void bch_generic_make_request(struct bio *, struct bio_split_pool *);
[all …]
Dsuper.c224 static void write_bdev_super_endio(struct bio *bio, int error) in write_bdev_super_endio() argument
226 struct cached_dev *dc = bio->bi_private; in write_bdev_super_endio()
232 static void __write_super(struct cache_sb *sb, struct bio *bio) in __write_super() argument
234 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); in __write_super()
237 bio->bi_iter.bi_sector = SB_SECTOR; in __write_super()
238 bio->bi_rw = REQ_SYNC|REQ_META; in __write_super()
239 bio->bi_iter.bi_size = SB_SIZE; in __write_super()
240 bch_bio_map(bio, NULL); in __write_super()
264 submit_bio(REQ_WRITE, bio); in __write_super()
277 struct bio *bio = &dc->sb_bio; in bch_write_bdev_super() local
[all …]
Dutil.c225 void bch_bio_map(struct bio *bio, void *base) in bch_bio_map() argument
227 size_t size = bio->bi_iter.bi_size; in bch_bio_map()
228 struct bio_vec *bv = bio->bi_io_vec; in bch_bio_map()
230 BUG_ON(!bio->bi_iter.bi_size); in bch_bio_map()
231 BUG_ON(bio->bi_vcnt); in bch_bio_map()
236 for (; size; bio->bi_vcnt++, bv++) { in bch_bio_map()
Djournal.h148 struct bio discard_bio;
152 struct bio bio; member
Drequest.h7 struct bio *bio; member
Dbtree.c281 static void btree_node_read_endio(struct bio *bio, int error) in btree_node_read_endio() argument
283 struct closure *cl = bio->bi_private; in btree_node_read_endio()
291 struct bio *bio; in bch_btree_node_read() local
297 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
298 bio->bi_rw = REQ_META|READ_SYNC; in bch_btree_node_read()
299 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; in bch_btree_node_read()
300 bio->bi_end_io = btree_node_read_endio; in bch_btree_node_read()
301 bio->bi_private = &cl; in bch_btree_node_read()
303 bch_bio_map(bio, b->keys.set[0].data); in bch_btree_node_read()
305 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
[all …]
Dbtree.h146 struct bio *bio; member
Dutil.h572 void bch_bio_map(struct bio *bio, void *base);
579 #define closure_bio_submit(bio, cl, dev) \ argument
582 bch_generic_make_request(bio, &(dev)->bio_split_hook); \
/linux-4.1.27/kernel/power/
Dblock_io.c29 struct page *page, struct bio **bio_chain) in submit()
32 struct bio *bio; in submit() local
34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); in submit()
35 bio->bi_iter.bi_sector = sector; in submit()
36 bio->bi_bdev = bdev; in submit()
37 bio->bi_end_io = end_swap_bio_read; in submit()
39 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in submit()
42 bio_put(bio); in submit()
47 bio_get(bio); in submit()
50 submit_bio(bio_rw, bio); in submit()
[all …]
Dswap.c277 static int write_page(void *buf, sector_t offset, struct bio **bio_chain) in write_page()
351 struct bio **bio_chain) in swap_write_page()
448 struct bio *bio; in save_image() local
458 bio = NULL; in save_image()
464 ret = swap_write_page(handle, data_of(*snapshot), &bio); in save_image()
472 err2 = hib_wait_on_bio_chain(&bio); in save_image()
583 struct bio *bio; in save_image_lzo() local
677 bio = NULL; in save_image_lzo()
751 ret = swap_write_page(handle, page, &bio); in save_image_lzo()
762 err2 = hib_wait_on_bio_chain(&bio); in save_image_lzo()
[all …]
Dpower.h170 struct bio **bio_chain);
172 struct bio **bio_chain);
173 extern int hib_wait_on_bio_chain(struct bio **bio_chain);
/linux-4.1.27/fs/logfs/
Ddev_bdev.c19 struct bio bio; in sync_request() local
22 bio_init(&bio); in sync_request()
23 bio.bi_max_vecs = 1; in sync_request()
24 bio.bi_io_vec = &bio_vec; in sync_request()
28 bio.bi_vcnt = 1; in sync_request()
29 bio.bi_bdev = bdev; in sync_request()
30 bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); in sync_request()
31 bio.bi_iter.bi_size = PAGE_SIZE; in sync_request()
33 return submit_bio_wait(rw, &bio); in sync_request()
56 static void writeseg_end_io(struct bio *bio, int err) in writeseg_end_io() argument
[all …]
/linux-4.1.27/fs/
Dmpage.c45 static void mpage_end_io(struct bio *bio, int err) in mpage_end_io() argument
50 bio_for_each_segment_all(bv, bio, i) { in mpage_end_io()
52 page_endio(page, bio_data_dir(bio), err); in mpage_end_io()
55 bio_put(bio); in mpage_end_io()
58 static struct bio *mpage_bio_submit(int rw, struct bio *bio) in mpage_bio_submit() argument
60 bio->bi_end_io = mpage_end_io; in mpage_bio_submit()
61 guard_bio_eod(rw, bio); in mpage_bio_submit()
62 submit_bio(rw, bio); in mpage_bio_submit()
66 static struct bio *
71 struct bio *bio; in mpage_alloc() local
[all …]
Ddirect-io.c62 struct bio *bio; /* bio under assembly */ member
125 struct bio *bio_list; /* singly linked via bi_private */
283 static int dio_bio_complete(struct dio *dio, struct bio *bio);
288 static void dio_bio_end_aio(struct bio *bio, int error) in dio_bio_end_aio() argument
290 struct dio *dio = bio->bi_private; in dio_bio_end_aio()
295 dio_bio_complete(dio, bio); in dio_bio_end_aio()
321 static void dio_bio_end_io(struct bio *bio, int error) in dio_bio_end_io() argument
323 struct dio *dio = bio->bi_private; in dio_bio_end_io()
327 bio->bi_private = dio->bio_list; in dio_bio_end_io()
328 dio->bio_list = bio; in dio_bio_end_io()
[all …]
Dbuffer.c2937 static void end_bio_bh_io_sync(struct bio *bio, int err) in end_bio_bh_io_sync() argument
2939 struct buffer_head *bh = bio->bi_private; in end_bio_bh_io_sync()
2942 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); in end_bio_bh_io_sync()
2945 if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags))) in end_bio_bh_io_sync()
2948 bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); in end_bio_bh_io_sync()
2949 bio_put(bio); in end_bio_bh_io_sync()
2964 void guard_bio_eod(int rw, struct bio *bio) in guard_bio_eod() argument
2967 struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; in guard_bio_eod()
2970 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; in guard_bio_eod()
2979 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) in guard_bio_eod()
[all …]
Dinternal.h41 extern void guard_bio_eod(int rw, struct bio *bio);
/linux-4.1.27/fs/ext4/
Dreadpage.c58 struct bio *bio = ctx->bio; in completion_pages() local
62 bio_for_each_segment_all(bv, bio, i) { in completion_pages()
74 bio_put(bio); in completion_pages()
80 static inline bool ext4_bio_encrypted(struct bio *bio) in ext4_bio_encrypted() argument
83 return unlikely(bio->bi_private != NULL); in ext4_bio_encrypted()
101 static void mpage_end_io(struct bio *bio, int err) in mpage_end_io() argument
106 if (ext4_bio_encrypted(bio)) { in mpage_end_io()
107 struct ext4_crypto_ctx *ctx = bio->bi_private; in mpage_end_io()
113 ctx->bio = bio; in mpage_end_io()
118 bio_for_each_segment_all(bv, bio, i) { in mpage_end_io()
[all …]
Dpage-io.c61 static void ext4_finish_bio(struct bio *bio) in ext4_finish_bio() argument
64 int error = !test_bit(BIO_UPTODATE, &bio->bi_flags); in ext4_finish_bio()
67 bio_for_each_segment_all(bvec, bio, i) { in ext4_finish_bio()
127 struct bio *bio, *next_bio; in ext4_release_io_end() local
136 for (bio = io_end->bio; bio; bio = next_bio) { in ext4_release_io_end()
137 next_bio = bio->bi_private; in ext4_release_io_end()
138 ext4_finish_bio(bio); in ext4_release_io_end()
139 bio_put(bio); in ext4_release_io_end()
313 static void ext4_end_bio(struct bio *bio, int error) in ext4_end_bio() argument
315 ext4_io_end_t *io_end = bio->bi_private; in ext4_end_bio()
[all …]
Dcrypto.c484 struct bio *bio; in ext4_encrypted_zeroout() local
518 bio = bio_alloc(GFP_KERNEL, 1); in ext4_encrypted_zeroout()
519 if (!bio) { in ext4_encrypted_zeroout()
523 bio->bi_bdev = inode->i_sb->s_bdev; in ext4_encrypted_zeroout()
524 bio->bi_iter.bi_sector = pblk; in ext4_encrypted_zeroout()
525 err = bio_add_page(bio, ciphertext_page, in ext4_encrypted_zeroout()
528 bio_put(bio); in ext4_encrypted_zeroout()
531 err = submit_bio_wait(WRITE, bio); in ext4_encrypted_zeroout()
Dext4_crypto.h82 struct bio *bio; /* The bio for this context */ member
/linux-4.1.27/mm/
Dpage_io.c27 static struct bio *get_swap_bio(gfp_t gfp_flags, in get_swap_bio()
30 struct bio *bio; in get_swap_bio() local
32 bio = bio_alloc(gfp_flags, 1); in get_swap_bio()
33 if (bio) { in get_swap_bio()
34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); in get_swap_bio()
35 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; in get_swap_bio()
36 bio->bi_io_vec[0].bv_page = page; in get_swap_bio()
37 bio->bi_io_vec[0].bv_len = PAGE_SIZE; in get_swap_bio()
38 bio->bi_io_vec[0].bv_offset = 0; in get_swap_bio()
39 bio->bi_vcnt = 1; in get_swap_bio()
[all …]
/linux-4.1.27/include/trace/events/
Dblock.h258 TP_PROTO(struct request_queue *q, struct bio *bio),
260 TP_ARGS(q, bio),
271 __entry->dev = bio->bi_bdev ?
272 bio->bi_bdev->bd_dev : 0;
273 __entry->sector = bio->bi_iter.bi_sector;
274 __entry->nr_sector = bio_sectors(bio);
275 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
296 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
298 TP_ARGS(q, bio, error),
309 __entry->dev = bio->bi_bdev->bd_dev;
[all …]
Dbcache.h10 TP_PROTO(struct bcache_device *d, struct bio *bio),
11 TP_ARGS(d, bio),
24 __entry->dev = bio->bi_bdev->bd_dev;
27 __entry->sector = bio->bi_iter.bi_sector;
28 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
29 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
30 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
80 TP_PROTO(struct bcache_device *d, struct bio *bio),
81 TP_ARGS(d, bio)
85 TP_PROTO(struct bcache_device *d, struct bio *bio),
[all …]
Df2fs.h741 struct bio *bio),
743 TP_ARGS(sb, fio, bio),
757 __entry->sector = bio->bi_iter.bi_sector;
758 __entry->size = bio->bi_iter.bi_size;
772 struct bio *bio),
774 TP_ARGS(sb, fio, bio),
776 TP_CONDITION(bio)
782 struct bio *bio),
784 TP_ARGS(sb, fio, bio),
786 TP_CONDITION(bio)
/linux-4.1.27/drivers/md/
Ddm-raid1.c119 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) in queue_bio() argument
128 bio_list_add(bl, bio); in queue_bio()
138 struct bio *bio; in dispatch_bios() local
140 while ((bio = bio_list_pop(bio_list))) in dispatch_bios()
141 queue_bio(ms, bio, WRITE); in dispatch_bios()
161 static struct mirror *bio_get_m(struct bio *bio) in bio_get_m() argument
163 return (struct mirror *) bio->bi_next; in bio_get_m()
166 static void bio_set_m(struct bio *bio, struct mirror *m) in bio_set_m() argument
168 bio->bi_next = (struct bio *) m; in bio_set_m()
429 static int mirror_available(struct mirror_set *ms, struct bio *bio) in mirror_available() argument
[all …]
Ddm-thin.c209 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
327 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, in bio_detain() argument
339 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain()
475 struct bio *bio; in error_bio_list() local
477 while ((bio = bio_list_pop(bios))) in error_bio_list()
478 bio_endio(bio, error); in error_bio_list()
550 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) in get_bio_block() argument
553 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
563 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) in remap() argument
566 sector_t bi_sector = bio->bi_iter.bi_sector; in remap()
[all …]
Ddm-log-writes.c149 static void log_end_io(struct bio *bio, int err) in log_end_io() argument
151 struct log_writes_c *lc = bio->bi_private; in log_end_io()
164 bio_for_each_segment_all(bvec, bio, i) in log_end_io()
168 bio_put(bio); in log_end_io()
193 struct bio *bio; in write_metadata() local
198 bio = bio_alloc(GFP_KERNEL, 1); in write_metadata()
199 if (!bio) { in write_metadata()
203 bio->bi_iter.bi_size = 0; in write_metadata()
204 bio->bi_iter.bi_sector = sector; in write_metadata()
205 bio->bi_bdev = lc->logdev->bdev; in write_metadata()
[all …]
Ddm-bio-record.h26 static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) in dm_bio_record() argument
28 bd->bi_bdev = bio->bi_bdev; in dm_bio_record()
29 bd->bi_flags = bio->bi_flags; in dm_bio_record()
30 bd->bi_iter = bio->bi_iter; in dm_bio_record()
33 static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) in dm_bio_restore() argument
35 bio->bi_bdev = bd->bi_bdev; in dm_bio_restore()
36 bio->bi_flags = bd->bi_flags; in dm_bio_restore()
37 bio->bi_iter = bd->bi_iter; in dm_bio_restore()
Draid1.c54 #define IO_BLOCKED ((struct bio *)1)
59 #define IO_MADE_GOOD ((struct bio *)2)
61 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) argument
99 struct bio *bio; in r1buf_pool_alloc() local
111 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); in r1buf_pool_alloc()
112 if (!bio) in r1buf_pool_alloc()
114 r1_bio->bios[j] = bio; in r1buf_pool_alloc()
127 bio = r1_bio->bios[j]; in r1buf_pool_alloc()
128 bio->bi_vcnt = RESYNC_PAGES; in r1buf_pool_alloc()
130 if (bio_alloc_pages(bio, gfp_flags)) in r1buf_pool_alloc()
[all …]
Ddm-delay.c64 static void flush_bios(struct bio *bio) in flush_bios() argument
66 struct bio *n; in flush_bios()
68 while (bio) { in flush_bios()
69 n = bio->bi_next; in flush_bios()
70 bio->bi_next = NULL; in flush_bios()
71 generic_make_request(bio); in flush_bios()
72 bio = n; in flush_bios()
76 static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) in flush_delayed_bios()
86 struct bio *bio = dm_bio_from_per_bio_data(delayed, in flush_delayed_bios() local
89 bio_list_add(&flush_bios, bio); in flush_delayed_bios()
[all …]
Dmultipath.c77 struct bio *bio = mp_bh->master_bio; in multipath_end_bh_io() local
80 bio_endio(bio, err); in multipath_end_bh_io()
84 static void multipath_end_request(struct bio *bio, int error) in multipath_end_request() argument
86 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); in multipath_end_request()
87 struct multipath_bh *mp_bh = bio->bi_private; in multipath_end_request()
93 else if (!(bio->bi_rw & REQ_RAHEAD)) { in multipath_end_request()
101 (unsigned long long)bio->bi_iter.bi_sector); in multipath_end_request()
108 static void multipath_make_request(struct mddev *mddev, struct bio * bio) in multipath_make_request() argument
114 if (unlikely(bio->bi_rw & REQ_FLUSH)) { in multipath_make_request()
115 md_flush_request(mddev, bio); in multipath_make_request()
[all …]
Draid10.c83 #define IO_BLOCKED ((struct bio *)1)
88 #define IO_MADE_GOOD ((struct bio *)2)
90 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) argument
104 static void end_reshape_write(struct bio *bio, int error);
142 struct bio *bio; in r10buf_pool_alloc() local
160 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); in r10buf_pool_alloc()
161 if (!bio) in r10buf_pool_alloc()
163 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
166 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); in r10buf_pool_alloc()
167 if (!bio) in r10buf_pool_alloc()
[all …]
Ddm-flakey.c18 #define all_corrupt_bio_flags_match(bio, fc) \ argument
19 (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
245 static void flakey_map_bio(struct dm_target *ti, struct bio *bio) in flakey_map_bio() argument
249 bio->bi_bdev = fc->dev->bdev; in flakey_map_bio()
250 if (bio_sectors(bio)) in flakey_map_bio()
251 bio->bi_iter.bi_sector = in flakey_map_bio()
252 flakey_map_sector(ti, bio->bi_iter.bi_sector); in flakey_map_bio()
255 static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) in corrupt_bio_data() argument
257 unsigned bio_bytes = bio_cur_bytes(bio); in corrupt_bio_data()
258 char *data = bio_data(bio); in corrupt_bio_data()
[all …]
Ddm-cache-target.c75 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio, in dm_hook_bio() argument
78 h->bi_end_io = bio->bi_end_io; in dm_hook_bio()
79 h->bi_private = bio->bi_private; in dm_hook_bio()
81 bio->bi_end_io = bi_end_io; in dm_hook_bio()
82 bio->bi_private = bi_private; in dm_hook_bio()
85 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) in dm_unhook_bio() argument
87 bio->bi_end_io = h->bi_end_io; in dm_unhook_bio()
88 bio->bi_private = h->bi_private; in dm_unhook_bio()
94 atomic_inc(&bio->bi_remaining); in dm_unhook_bio()
480 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc, in bio_detain_range() argument
[all …]
Ddm-snap.c201 struct bio *full_bio;
218 static void init_tracked_chunk(struct bio *bio) in init_tracked_chunk() argument
220 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in init_tracked_chunk()
224 static bool is_bio_tracked(struct bio *bio) in is_bio_tracked() argument
226 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in is_bio_tracked()
230 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) in track_chunk() argument
232 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in track_chunk()
242 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) in stop_tracking_chunk() argument
244 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); in stop_tracking_chunk()
838 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) in __release_queued_bios_after_merge()
[all …]
Dfaulty.c73 static void faulty_fail(struct bio *bio, int error) in faulty_fail() argument
75 struct bio *b = bio->bi_private; in faulty_fail()
77 b->bi_iter.bi_size = bio->bi_iter.bi_size; in faulty_fail()
78 b->bi_iter.bi_sector = bio->bi_iter.bi_sector; in faulty_fail()
80 bio_put(bio); in faulty_fail()
173 static void make_request(struct mddev *mddev, struct bio *bio) in make_request() argument
178 if (bio_data_dir(bio) == WRITE) { in make_request()
184 bio_endio(bio, -EIO); in make_request()
188 if (check_sector(conf, bio->bi_iter.bi_sector, in make_request()
189 bio_end_sector(bio), WRITE)) in make_request()
[all …]
Ddm-io.c90 static void store_io_and_region_in_bio(struct bio *bio, struct io *io, in store_io_and_region_in_bio() argument
98 bio->bi_private = (void *)((unsigned long)io | region); in store_io_and_region_in_bio()
101 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, in retrieve_io_and_region_from_bio() argument
104 unsigned long val = (unsigned long)bio->bi_private; in retrieve_io_and_region_from_bio()
137 static void endio(struct bio *bio, int error) in endio() argument
142 if (error && bio_data_dir(bio) == READ) in endio()
143 zero_fill_bio(bio); in endio()
148 retrieve_io_and_region_from_bio(bio, &io, &region); in endio()
150 bio_put(bio); in endio()
219 static void bio_dp_init(struct dpages *dp, struct bio *bio) in bio_dp_init() argument
[all …]
Ddm-stripe.c259 static int stripe_map_range(struct stripe_c *sc, struct bio *bio, in stripe_map_range() argument
264 stripe_map_range_sector(sc, bio->bi_iter.bi_sector, in stripe_map_range()
266 stripe_map_range_sector(sc, bio_end_sector(bio), in stripe_map_range()
269 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; in stripe_map_range()
270 bio->bi_iter.bi_sector = begin + in stripe_map_range()
272 bio->bi_iter.bi_size = to_bytes(end - begin); in stripe_map_range()
276 bio_endio(bio, 0); in stripe_map_range()
281 static int stripe_map(struct dm_target *ti, struct bio *bio) in stripe_map() argument
287 if (bio->bi_rw & REQ_FLUSH) { in stripe_map()
288 target_bio_nr = dm_bio_get_target_bio_nr(bio); in stripe_map()
[all …]
Dlinear.c259 static void linear_make_request(struct mddev *mddev, struct bio *bio) in linear_make_request() argument
263 struct bio *split; in linear_make_request()
266 if (unlikely(bio->bi_rw & REQ_FLUSH)) { in linear_make_request()
267 md_flush_request(mddev, bio); in linear_make_request()
272 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); in linear_make_request()
276 bio->bi_bdev = tmp_dev->rdev->bdev; in linear_make_request()
278 if (unlikely(bio->bi_iter.bi_sector >= end_sector || in linear_make_request()
279 bio->bi_iter.bi_sector < start_sector)) in linear_make_request()
282 if (unlikely(bio_end_sector(bio) > end_sector)) { in linear_make_request()
286 split = bio_split(bio, end_sector - in linear_make_request()
[all …]
Ddm.c72 struct bio *bio; member
100 struct bio *orig;
102 struct bio clone;
213 struct bio flush_bio;
643 struct bio *bio = io->bio; in start_io_acct() local
645 int rw = bio_data_dir(bio); in start_io_acct()
656 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in start_io_acct()
657 bio_sectors(bio), false, 0, &io->stats_aux); in start_io_acct()
663 struct bio *bio = io->bio; in end_io_acct() local
666 int rw = bio_data_dir(bio); in end_io_acct()
[all …]
Ddm-zero.c36 static int zero_map(struct dm_target *ti, struct bio *bio) in zero_map() argument
38 switch(bio_rw(bio)) { in zero_map()
40 zero_fill_bio(bio); in zero_map()
50 bio_endio(bio, 0); in zero_map()
Ddm-linear.c82 static void linear_map_bio(struct dm_target *ti, struct bio *bio) in linear_map_bio() argument
86 bio->bi_bdev = lc->dev->bdev; in linear_map_bio()
87 if (bio_sectors(bio)) in linear_map_bio()
88 bio->bi_iter.bi_sector = in linear_map_bio()
89 linear_map_sector(ti, bio->bi_iter.bi_sector); in linear_map_bio()
92 static int linear_map(struct dm_target *ti, struct bio *bio) in linear_map() argument
94 linear_map_bio(ti, bio); in linear_map()
Ddm-verity.c354 struct bio *bio = dm_bio_from_per_bio_data(io, in verity_verify_io() local
409 struct bio_vec bv = bio_iter_iovec(bio, io->iter); in verity_verify_io()
423 bio_advance_iter(bio, &io->iter, len); in verity_verify_io()
457 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size); in verity_finish_io() local
459 bio->bi_end_io = io->orig_bi_end_io; in verity_finish_io()
460 bio->bi_private = io->orig_bi_private; in verity_finish_io()
462 bio_endio_nodec(bio, error); in verity_finish_io()
472 static void verity_end_io(struct bio *bio, int error) in verity_end_io() argument
474 struct dm_verity_io *io = bio->bi_private; in verity_end_io()
546 static int verity_map(struct dm_target *ti, struct bio *bio) in verity_map() argument
[all …]
Ddm-region-hash.c127 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) in dm_rh_bio_to_region() argument
129 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - in dm_rh_bio_to_region()
395 void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) in dm_rh_mark_nosync() argument
400 region_t region = dm_rh_bio_to_region(rh, bio); in dm_rh_mark_nosync()
403 if (bio->bi_rw & REQ_FLUSH) { in dm_rh_mark_nosync()
408 if (bio->bi_rw & REQ_DISCARD) in dm_rh_mark_nosync()
528 struct bio *bio; in dm_rh_inc_pending() local
530 for (bio = bios->head; bio; bio = bio->bi_next) { in dm_rh_inc_pending()
531 if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)) in dm_rh_inc_pending()
533 rh_inc(rh, dm_rh_bio_to_region(rh, bio)); in dm_rh_inc_pending()
[all …]
Draid10.h103 struct bio *master_bio;
119 struct bio *bio; member
121 struct bio *repl_bio; /* used for resync and
Dmultipath.h26 struct bio *master_bio;
27 struct bio bio; member
Draid0.c497 unsigned int chunk_sects, struct bio *bio) in is_io_in_chunk_boundary() argument
501 ((bio->bi_iter.bi_sector & (chunk_sects-1)) in is_io_in_chunk_boundary()
502 + bio_sectors(bio)); in is_io_in_chunk_boundary()
504 sector_t sector = bio->bi_iter.bi_sector; in is_io_in_chunk_boundary()
506 + bio_sectors(bio)); in is_io_in_chunk_boundary()
510 static void raid0_make_request(struct mddev *mddev, struct bio *bio) in raid0_make_request() argument
514 struct bio *split; in raid0_make_request()
516 if (unlikely(bio->bi_rw & REQ_FLUSH)) { in raid0_make_request()
517 md_flush_request(mddev, bio); in raid0_make_request()
522 sector_t sector = bio->bi_iter.bi_sector; in raid0_make_request()
[all …]
Ddm-bufio.c150 struct bio bio; member
548 b->bio.bi_end_io(&b->bio, error ? -EIO : 0); in dmio_complete()
575 b->bio.bi_end_io = end_io; in use_dmio()
579 end_io(&b->bio, r); in use_dmio()
582 static void inline_endio(struct bio *bio, int error) in inline_endio() argument
584 bio_end_io_t *end_fn = bio->bi_private; in inline_endio()
590 bio_reset(bio); in inline_endio()
592 end_fn(bio, error); in inline_endio()
601 bio_init(&b->bio); in use_inline_bio()
602 b->bio.bi_io_vec = b->bio_vec; in use_inline_bio()
[all …]
Dmd.h455 struct bio *flush_bio;
498 void (*make_request)(struct mddev *mddev, struct bio *bio);
634 extern void md_write_start(struct mddev *mddev, struct bio *bi);
641 extern void md_flush_request(struct mddev *mddev, struct bio *bio);
666 extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
668 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
Ddm-bio-prison.c75 struct bio *holder, in __setup_new_cell()
109 struct bio *inmate, in __bio_detain()
146 struct bio *inmate, in bio_detain()
162 struct bio *inmate, in dm_bio_detain()
234 struct bio *bio; in dm_cell_error() local
239 while ((bio = bio_list_pop(&bios))) in dm_cell_error()
240 bio_endio(bio, error); in dm_cell_error()
Ddm-era-target.c1184 static dm_block_t get_block(struct era *era, struct bio *bio) in get_block() argument
1186 sector_t block_nr = bio->bi_iter.bi_sector; in get_block()
1196 static void remap_to_origin(struct era *era, struct bio *bio) in remap_to_origin() argument
1198 bio->bi_bdev = era->origin_dev->bdev; in remap_to_origin()
1230 struct bio *bio; in process_deferred_bios() local
1242 while ((bio = bio_list_pop(&deferred_bios))) { in process_deferred_bios()
1245 get_block(era, bio)); in process_deferred_bios()
1256 bio_list_add(&marked_bios, bio); in process_deferred_bios()
1266 while ((bio = bio_list_pop(&marked_bios))) in process_deferred_bios()
1267 bio_io_error(bio); in process_deferred_bios()
[all …]
Ddm-crypt.c41 struct bio *bio_in;
42 struct bio *bio_out;
55 struct bio *base_bio;
184 static void clone_init(struct dm_crypt_io *, struct bio *);
804 struct bio *bio_out, struct bio *bio_in, in crypt_convert_init()
900 struct ablkcipher_request *req, struct bio *base_bio) in crypt_free_req()
954 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
973 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) in crypt_alloc_buffer()
976 struct bio *clone; in crypt_alloc_buffer()
1023 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) in crypt_free_buffer_pages()
[all …]
Draid5.h241 struct bio req, rreq;
244 struct bio *toread, *read, *towrite, *written;
268 struct bio *return_bi;
469 struct bio *retry_read_aligned; /* currently retrying aligned bios */
470 struct bio *retry_read_aligned_list; /* aligned bios retry list */
Ddm-cache-policy-internal.h19 struct bio *bio, struct policy_locker *locker, in policy_map() argument
22 return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, locker, result); in policy_map()
Draid1.h133 struct bio *master_bio;
147 struct bio *bios[0];
Ddm-bio-prison.h45 struct bio *holder;
83 struct bio *inmate,
Ddm-cache-policy.h137 struct bio *bio, struct policy_locker *locker,
Ddm-target.c129 static int io_err_map(struct dm_target *tt, struct bio *bio) in io_err_map() argument
Ddm-switch.c319 static int switch_map(struct dm_target *ti, struct bio *bio) in switch_map() argument
322 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); in switch_map()
325 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; in switch_map()
326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; in switch_map()
Ddm-cache-policy-mq.c74 static void iot_update_stats(struct io_tracker *t, struct bio *bio) in iot_update_stats() argument
76 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) in iot_update_stats()
91 t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1); in iot_update_stats()
113 static void iot_examine_bio(struct io_tracker *t, struct bio *bio) in iot_examine_bio() argument
115 iot_update_stats(t, bio); in iot_examine_bio()
1029 struct bio *bio, struct policy_locker *locker, in mq_map() argument
1044 iot_examine_bio(&mq->tracker, bio); in mq_map()
1046 bio_data_dir(bio), locker, result); in mq_map()
Draid5.c138 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) in r5_next_bio() argument
140 int sectors = bio_sectors(bio); in r5_next_bio()
141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) in r5_next_bio()
142 return bio->bi_next; in r5_next_bio()
151 static inline int raid5_bi_processed_stripes(struct bio *bio) in raid5_bi_processed_stripes() argument
153 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; in raid5_bi_processed_stripes()
157 static inline int raid5_dec_bi_active_stripes(struct bio *bio) in raid5_dec_bi_active_stripes() argument
159 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; in raid5_dec_bi_active_stripes()
163 static inline void raid5_inc_bi_active_stripes(struct bio *bio) in raid5_inc_bi_active_stripes() argument
165 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; in raid5_inc_bi_active_stripes()
[all …]
Dmd.c164 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, in bio_alloc_mddev()
167 struct bio *b; in bio_alloc_mddev()
179 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, in bio_clone_mddev() argument
183 return bio_clone(bio, gfp_mask); in bio_clone_mddev()
185 return bio_clone_bioset(bio, gfp_mask, mddev->bio_set); in bio_clone_mddev()
253 static void md_make_request(struct request_queue *q, struct bio *bio) in md_make_request() argument
255 const int rw = bio_data_dir(bio); in md_make_request()
262 bio_io_error(bio); in md_make_request()
266 bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS); in md_make_request()
291 sectors = bio_sectors(bio); in md_make_request()
[all …]
Ddm-cache-policy-cleaner.c174 struct bio *bio, struct policy_locker *locker, in wb_map() argument
DMakefile39 obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
/linux-4.1.27/fs/nfs/blocklayout/
Dblocklayout.c104 static struct bio *
105 bl_submit_bio(int rw, struct bio *bio) in bl_submit_bio() argument
107 if (bio) { in bl_submit_bio()
108 get_parallel(bio->bi_private); in bl_submit_bio()
110 rw == READ ? "read" : "write", bio->bi_iter.bi_size, in bl_submit_bio()
111 (unsigned long long)bio->bi_iter.bi_sector); in bl_submit_bio()
112 submit_bio(rw, bio); in bl_submit_bio()
117 static struct bio *
119 void (*end_io)(struct bio *, int err), struct parallel_io *par) in bl_alloc_init_bio() argument
121 struct bio *bio; in bl_alloc_init_bio() local
[all …]
/linux-4.1.27/drivers/target/
Dtarget_core_iblock.c309 static void iblock_bio_done(struct bio *bio, int err) in iblock_bio_done() argument
311 struct se_cmd *cmd = bio->bi_private; in iblock_bio_done()
317 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) in iblock_bio_done()
322 " err: %d\n", bio, err); in iblock_bio_done()
330 bio_put(bio); in iblock_bio_done()
335 static struct bio *
339 struct bio *bio; in iblock_get_bio() local
348 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); in iblock_get_bio()
349 if (!bio) { in iblock_get_bio()
354 bio->bi_bdev = ib_dev->ibd_bd; in iblock_get_bio()
[all …]
Dtarget_core_pscsi.c850 static void pscsi_bi_endio(struct bio *bio, int error) in pscsi_bi_endio() argument
852 bio_put(bio); in pscsi_bi_endio()
855 static inline struct bio *pscsi_get_bio(int nr_vecs) in pscsi_get_bio()
857 struct bio *bio; in pscsi_get_bio() local
862 bio = bio_kmalloc(GFP_KERNEL, nr_vecs); in pscsi_get_bio()
863 if (!bio) { in pscsi_get_bio()
867 bio->bi_end_io = pscsi_bi_endio; in pscsi_get_bio()
869 return bio; in pscsi_get_bio()
874 enum dma_data_direction data_direction, struct bio **hbio) in pscsi_map_sg()
877 struct bio *bio = NULL, *tbio = NULL; in pscsi_map_sg() local
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
Dlloop.c131 struct bio *lo_bio;
132 struct bio *lo_biotail;
185 static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) in do_bio_lustrebacked()
197 struct bio *bio; in do_bio_lustrebacked() local
216 for (bio = head; bio != NULL; bio = bio->bi_next) { in do_bio_lustrebacked()
217 LASSERT(rw == bio->bi_rw); in do_bio_lustrebacked()
219 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; in do_bio_lustrebacked()
220 bio_for_each_segment(bvec, bio, iter) { in do_bio_lustrebacked()
270 static void loop_add_bio(struct lloop_device *lo, struct bio *bio) in loop_add_bio() argument
276 lo->lo_biotail->bi_next = bio; in loop_add_bio()
[all …]
/linux-4.1.27/fs/nilfs2/
Dsegbuf.c35 struct bio *bio; member
341 static void nilfs_end_bio_write(struct bio *bio, int err) in nilfs_end_bio_write() argument
343 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); in nilfs_end_bio_write()
344 struct nilfs_segment_buffer *segbuf = bio->bi_private; in nilfs_end_bio_write()
347 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); in nilfs_end_bio_write()
354 bio_put(bio); in nilfs_end_bio_write()
361 struct bio *bio = wi->bio; in nilfs_segbuf_submit_bio() local
369 bio_put(bio); in nilfs_segbuf_submit_bio()
375 bio->bi_end_io = nilfs_end_bio_write; in nilfs_segbuf_submit_bio()
376 bio->bi_private = segbuf; in nilfs_segbuf_submit_bio()
[all …]
/linux-4.1.27/drivers/block/rsxx/
Ddev.c59 struct bio *bio; member
113 static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio) in disk_stats_start() argument
115 generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio), in disk_stats_start()
120 struct bio *bio, in disk_stats_complete() argument
123 generic_end_io_acct(bio_data_dir(bio), &card->gendisk->part0, in disk_stats_complete()
138 disk_stats_complete(card, meta->bio, meta->start_time); in bio_dma_done_cb()
140 bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); in bio_dma_done_cb()
145 static void rsxx_make_request(struct request_queue *q, struct bio *bio) in rsxx_make_request() argument
156 if (bio_end_sector(bio) > get_capacity(card->gendisk)) in rsxx_make_request()
169 if (bio->bi_iter.bi_size == 0) { in rsxx_make_request()
[all …]
Ddma.c681 struct bio *bio, in rsxx_dma_queue_bio() argument
700 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ in rsxx_dma_queue_bio()
708 if (bio->bi_rw & REQ_DISCARD) { in rsxx_dma_queue_bio()
709 bv_len = bio->bi_iter.bi_size; in rsxx_dma_queue_bio()
726 bio_for_each_segment(bvec, bio, iter) { in rsxx_dma_queue_bio()
738 bio_data_dir(bio), in rsxx_dma_queue_bio()
Drsxx_priv.h395 struct bio *bio,
/linux-4.1.27/fs/btrfs/
Draid56.c185 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
856 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
857 struct bio *next; in rbio_orig_end_io()
878 static void raid_write_end_io(struct bio *bio, int err) in raid_write_end_io() argument
880 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_end_io()
883 fail_bio_stripe(rbio, bio); in raid_write_end_io()
885 bio_put(bio); in raid_write_end_io()
1050 struct bio *last = bio_list->tail; in rbio_add_io_page()
1053 struct bio *bio; in rbio_add_io_page() local
1083 bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); in rbio_add_io_page()
[all …]
Dcompression.c75 struct bio *orig_bio;
97 static struct bio *compressed_bio_alloc(struct block_device *bdev, in compressed_bio_alloc()
155 static void end_compressed_bio_read(struct bio *bio, int err) in end_compressed_bio_read() argument
157 struct compressed_bio *cb = bio->bi_private; in end_compressed_bio_read()
174 (u64)bio->bi_iter.bi_sector << 9); in end_compressed_bio_read()
220 bio_put(bio); in end_compressed_bio_read()
269 static void end_compressed_bio_write(struct bio *bio, int err) in end_compressed_bio_write() argument
272 struct compressed_bio *cb = bio->bi_private; in end_compressed_bio_write()
317 bio_put(bio); in end_compressed_bio_write()
335 struct bio *bio = NULL; in btrfs_submit_compressed_write() local
[all …]
Draid56.h45 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
48 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
52 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
Dextent_io.c120 struct bio *bio; member
158 offsetof(struct btrfs_io_bio, bio)); in extent_io_init()
2030 struct bio *bio; in repair_io_failure() local
2045 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); in repair_io_failure()
2046 if (!bio) in repair_io_failure()
2048 bio->bi_iter.bi_size = 0; in repair_io_failure()
2054 bio_put(bio); in repair_io_failure()
2059 bio->bi_iter.bi_sector = sector; in repair_io_failure()
2063 bio_put(bio); in repair_io_failure()
2066 bio->bi_bdev = dev->bdev; in repair_io_failure()
[all …]
Dcheck-integrity.h24 void btrfsic_submit_bio(int rw, struct bio *bio);
25 int btrfsic_submit_bio_wait(int rw, struct bio *bio);
Dvolumes.h33 struct bio *head;
34 struct bio *tail;
128 struct bio *flush_bio;
269 typedef void (btrfs_io_bio_end_io_t) (struct btrfs_io_bio *bio, int err);
278 struct bio bio; member
281 static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio) in btrfs_io_bio() argument
283 return container_of(bio, struct btrfs_io_bio, bio); in btrfs_io_bio()
293 typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
303 struct bio *orig_bio;
425 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
Dscrub.c97 struct bio *bio; member
281 static void scrub_bio_end_io(struct bio *bio, int err);
298 static void scrub_wr_bio_end_io(struct bio *bio, int err);
430 bio_put(sbio->bio); in scrub_free_ctx()
1432 static void scrub_bio_wait_endio(struct bio *bio, int error) in scrub_bio_wait_endio() argument
1434 struct scrub_bio_ret *ret = bio->bi_private; in scrub_bio_wait_endio()
1447 struct bio *bio, in scrub_submit_raid56_bio_wait() argument
1455 bio->bi_iter.bi_sector = page->logical >> 9; in scrub_submit_raid56_bio_wait()
1456 bio->bi_private = &done; in scrub_submit_raid56_bio_wait()
1457 bio->bi_end_io = scrub_bio_wait_endio; in scrub_submit_raid56_bio_wait()
[all …]
Dcheck-integrity.c168 bio_end_io_t *bio; member
339 struct bio *bio, int *bio_is_patched,
346 static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status);
416 b->orig_bio_bh_end_io.bio = NULL; in btrfsic_block_init()
1674 struct bio *bio; in btrfsic_read_block() local
1677 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); in btrfsic_read_block()
1678 if (!bio) { in btrfsic_read_block()
1684 bio->bi_bdev = block_ctx->dev->bdev; in btrfsic_read_block()
1685 bio->bi_iter.bi_sector = dev_bytenr >> 9; in btrfsic_read_block()
1688 ret = bio_add_page(bio, block_ctx->pagev[j], in btrfsic_read_block()
[all …]
Dfile-item.c156 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err) in btrfs_io_bio_endio_readpage() argument
158 kfree(bio->csum_allocated); in btrfs_io_bio_endio_readpage()
162 struct inode *inode, struct bio *bio, in __btrfs_lookup_bio_sums() argument
165 struct bio_vec *bvec = bio->bi_io_vec; in __btrfs_lookup_bio_sums()
166 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio); in __btrfs_lookup_bio_sums()
185 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; in __btrfs_lookup_bio_sums()
204 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) in __btrfs_lookup_bio_sums()
207 WARN_ON(bio->bi_vcnt <= 0); in __btrfs_lookup_bio_sums()
220 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; in __btrfs_lookup_bio_sums()
223 while (bio_index < bio->bi_vcnt) { in __btrfs_lookup_bio_sums()
[all …]
Dextent_io.h65 struct bio *bio, int mirror_num,
75 size_t size, struct bio *bio,
328 struct bio *
331 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs);
332 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
367 int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
369 struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
Ddisk-io.c80 struct bio *bio; member
117 struct bio *bio; member
706 static void end_workqueue_bio(struct bio *bio, int err) in end_workqueue_bio() argument
708 struct btrfs_end_io_wq *end_io_wq = bio->bi_private; in end_workqueue_bio()
716 if (bio->bi_rw & REQ_WRITE) { in end_workqueue_bio()
751 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, in btrfs_bio_wq_end_io() argument
760 end_io_wq->private = bio->bi_private; in btrfs_bio_wq_end_io()
761 end_io_wq->end_io = bio->bi_end_io; in btrfs_bio_wq_end_io()
764 end_io_wq->bio = bio; in btrfs_bio_wq_end_io()
767 bio->bi_private = end_io_wq; in btrfs_bio_wq_end_io()
[all …]
Ddisk-io.h121 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
124 int rw, struct bio *bio, int mirror_num,
Dvolumes.c226 struct bio *head, struct bio *tail) in requeue_list()
229 struct bio *old_head; in requeue_list()
252 struct bio *pending; in run_scheduled_bios()
256 struct bio *tail; in run_scheduled_bios()
257 struct bio *cur; in run_scheduled_bios()
5588 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err) in btrfs_end_bbio() argument
5591 bio_endio_nodec(bio, err); in btrfs_end_bbio()
5593 bio_endio(bio, err); in btrfs_end_bbio()
5597 static void btrfs_end_bio(struct bio *bio, int err) in btrfs_end_bio() argument
5599 struct btrfs_bio *bbio = bio->bi_private; in btrfs_end_bio()
[all …]
Dinode.c1794 size_t size, struct bio *bio, in btrfs_merge_bio_hook() argument
1798 u64 logical = (u64)bio->bi_iter.bi_sector << 9; in btrfs_merge_bio_hook()
1806 length = bio->bi_iter.bi_size; in btrfs_merge_bio_hook()
1826 struct bio *bio, int mirror_num, in __btrfs_submit_bio_start() argument
1833 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); in __btrfs_submit_bio_start()
1846 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, in __btrfs_submit_bio_done() argument
1853 ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); in __btrfs_submit_bio_done()
1855 bio_endio(bio, ret); in __btrfs_submit_bio_done()
1863 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, in btrfs_submit_bio_hook() argument
1879 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata); in btrfs_submit_bio_hook()
[all …]
Dcompression.h46 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
Dbtrfs_inode.h300 struct bio *orig_bio;
303 struct bio *dio_bio;
/linux-4.1.27/drivers/block/
Dumem.c110 struct bio *bio, *currentbio, **biotail; member
119 struct bio *bio, **biotail; member
330 page->bio = NULL; in reset_page()
331 page->biotail = &page->bio; in reset_page()
345 struct bio *bio; in add_bio() local
349 bio = card->currentbio; in add_bio()
350 if (!bio && card->bio) { in add_bio()
351 card->currentbio = card->bio; in add_bio()
352 card->current_iter = card->bio->bi_iter; in add_bio()
353 card->bio = card->bio->bi_next; in add_bio()
[all …]
Dpktcdvd.c545 struct bio *bio = bio_kmalloc(GFP_KERNEL, 1); in pkt_alloc_packet_data() local
546 if (!bio) in pkt_alloc_packet_data()
549 pkt->r_bios[i] = bio; in pkt_alloc_packet_data()
556 struct bio *bio = pkt->r_bios[i]; in pkt_alloc_packet_data() local
557 if (bio) in pkt_alloc_packet_data()
558 bio_put(bio); in pkt_alloc_packet_data()
580 struct bio *bio = pkt->r_bios[i]; in pkt_free_packet_data() local
581 if (bio) in pkt_free_packet_data()
582 bio_put(bio); in pkt_free_packet_data()
654 if (s <= tmp->bio->bi_iter.bi_sector) in pkt_rbtree_find()
[all …]
Dosdblk.c98 struct bio *bio; /* cloned bio */ member
252 static void bio_chain_put(struct bio *chain) in bio_chain_put()
254 struct bio *tmp; in bio_chain_put()
264 static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask) in bio_chain_clone()
266 struct bio *tmp, *new_chain = NULL, *tail = NULL; in bio_chain_clone()
303 struct bio *bio; in osdblk_rq_fn() local
329 bio = bio_chain_clone(rq->bio, GFP_ATOMIC); in osdblk_rq_fn()
330 if (!bio) in osdblk_rq_fn()
333 bio = NULL; in osdblk_rq_fn()
338 bio_chain_put(bio); in osdblk_rq_fn()
[all …]
Dpmem.c60 static void pmem_make_request(struct request_queue *q, struct bio *bio) in pmem_make_request() argument
62 struct block_device *bdev = bio->bi_bdev; in pmem_make_request()
70 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) { in pmem_make_request()
75 BUG_ON(bio->bi_rw & REQ_DISCARD); in pmem_make_request()
77 rw = bio_data_dir(bio); in pmem_make_request()
78 sector = bio->bi_iter.bi_sector; in pmem_make_request()
79 bio_for_each_segment(bvec, bio, iter) { in pmem_make_request()
86 bio_endio(bio, err); in pmem_make_request()
Dbrd.c326 static void brd_make_request(struct request_queue *q, struct bio *bio) in brd_make_request() argument
328 struct block_device *bdev = bio->bi_bdev; in brd_make_request()
336 sector = bio->bi_iter.bi_sector; in brd_make_request()
337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) in brd_make_request()
340 if (unlikely(bio->bi_rw & REQ_DISCARD)) { in brd_make_request()
342 discard_from_brd(brd, sector, bio->bi_iter.bi_size); in brd_make_request()
346 rw = bio_rw(bio); in brd_make_request()
350 bio_for_each_segment(bvec, bio, iter) { in brd_make_request()
360 bio_endio(bio, err); in brd_make_request()
Dps3vram.c550 static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev, in ps3vram_do_bio()
551 struct bio *bio) in ps3vram_do_bio() argument
554 int write = bio_data_dir(bio) == WRITE; in ps3vram_do_bio()
556 loff_t offset = bio->bi_iter.bi_sector << 9; in ps3vram_do_bio()
560 struct bio *next; in ps3vram_do_bio()
562 bio_for_each_segment(bvec, bio, iter) { in ps3vram_do_bio()
596 bio_endio(bio, error); in ps3vram_do_bio()
600 static void ps3vram_make_request(struct request_queue *q, struct bio *bio) in ps3vram_make_request() argument
610 bio_list_add(&priv->list, bio); in ps3vram_make_request()
617 bio = ps3vram_do_bio(dev, bio); in ps3vram_make_request()
[all …]
Dxen-blkfront.c83 struct bio *bio; member
1445 static void split_bio_end(struct bio *bio, int error) in split_bio_end() argument
1447 struct split_bio *split_bio = bio->bi_private; in split_bio_end()
1453 split_bio->bio->bi_phys_segments = 0; in split_bio_end()
1454 bio_endio(split_bio->bio, split_bio->err); in split_bio_end()
1457 bio_put(bio); in split_bio_end()
1466 struct bio *bio, *cloned_bio; in blkif_recover() local
1513 merge_bio.head = copy[i].request->bio; in blkif_recover()
1516 copy[i].request->bio = NULL; in blkif_recover()
1534 merge_bio.head = req->bio; in blkif_recover()
[all …]
Dnull_blk.c17 struct bio *bio; member
225 bio_endio(cmd->bio, 0); in end_cmd()
313 static void null_queue_bio(struct request_queue *q, struct bio *bio) in null_queue_bio() argument
320 cmd->bio = bio; in null_queue_bio()
Dfloppy.c2354 raw_cmd->kernel_data == bio_data(current_req->bio)) { in rw_interrupt()
2373 base = bio_data(current_req->bio); in buffer_chain_size()
2643 } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) { in make_raw_rw_request()
2657 ((unsigned long)bio_data(current_req->bio))) >> 9; in make_raw_rw_request()
2661 if (CROSS_64KB(bio_data(current_req->bio), max_size << 9)) in make_raw_rw_request()
2663 ((unsigned long)bio_data(current_req->bio)) % in make_raw_rw_request()
2680 raw_cmd->kernel_data = bio_data(current_req->bio); in make_raw_rw_request()
2734 (raw_cmd->kernel_data != bio_data(current_req->bio) && in make_raw_rw_request()
2742 if (raw_cmd->kernel_data != bio_data(current_req->bio)) in make_raw_rw_request()
2759 if (raw_cmd->kernel_data != bio_data(current_req->bio)) { in make_raw_rw_request()
[all …]
Dhd.c467 insw(HD_DATA, bio_data(req->bio), 256); in read_intr()
471 blk_rq_sectors(req) - 1, bio_data(req->bio)+512); in read_intr()
508 outsw(HD_DATA, bio_data(req->bio), 256); in write_intr()
627 cyl, head, sec, nsect, bio_data(req->bio)); in hd_request()
646 outsw(HD_DATA, bio_data(req->bio), 256); in hd_request()
Dvirtio_blk.c240 struct bio *bio; in virtblk_get_id() local
243 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, in virtblk_get_id()
245 if (IS_ERR(bio)) in virtblk_get_id()
246 return PTR_ERR(bio); in virtblk_get_id()
248 req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL); in virtblk_get_id()
250 bio_put(bio); in virtblk_get_id()
Drbd.c264 struct bio *bio_list;
1234 static void bio_chain_put(struct bio *chain) in bio_chain_put()
1236 struct bio *tmp; in bio_chain_put()
1248 static void zero_bio_chain(struct bio *chain, int start_ofs) in zero_bio_chain()
1309 static struct bio *bio_clone_range(struct bio *bio_src, in bio_clone_range()
1314 struct bio *bio; in bio_clone_range() local
1316 bio = bio_clone(bio_src, gfpmask); in bio_clone_range()
1317 if (!bio) in bio_clone_range()
1320 bio_advance(bio, offset); in bio_clone_range()
1321 bio->bi_iter.bi_size = len; in bio_clone_range()
[all …]
Dmg_disk.c482 u16 *buff = (u16 *)bio_data(req->bio); in mg_read_one()
499 blk_rq_sectors(req), blk_rq_pos(req), bio_data(req->bio)); in mg_read()
517 u16 *buff = (u16 *)bio_data(req->bio); in mg_write_one()
537 rem, blk_rq_pos(req), bio_data(req->bio)); in mg_write()
588 blk_rq_pos(req), blk_rq_sectors(req) - 1, bio_data(req->bio)); in mg_read_intr()
627 blk_rq_pos(req), blk_rq_sectors(req), bio_data(req->bio)); in mg_write_intr()
Dloop.c296 struct bio *bio; in lo_read_simple() local
298 __rq_for_each_bio(bio, rq) in lo_read_simple()
299 zero_fill_bio(bio); in lo_read_simple()
344 struct bio *bio; in lo_read_transfer() local
346 __rq_for_each_bio(bio, rq) in lo_read_transfer()
347 zero_fill_bio(bio); in lo_read_transfer()
/linux-4.1.27/fs/jfs/
Djfs_metapage.c279 static void metapage_read_end_io(struct bio *bio, int err) in metapage_read_end_io() argument
281 struct page *page = bio->bi_private; in metapage_read_end_io()
283 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { in metapage_read_end_io()
289 bio_put(bio); in metapage_read_end_io()
334 static void metapage_write_end_io(struct bio *bio, int err) in metapage_write_end_io() argument
336 struct page *page = bio->bi_private; in metapage_write_end_io()
340 if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) { in metapage_write_end_io()
345 bio_put(bio); in metapage_write_end_io()
350 struct bio *bio = NULL; in metapage_writepage() local
395 if (bio) { in metapage_writepage()
[all …]
Djfs_logmgr.c1987 struct bio *bio; in lbmRead() local
1998 bio = bio_alloc(GFP_NOFS, 1); in lbmRead()
2000 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmRead()
2001 bio->bi_bdev = log->bdev; in lbmRead()
2002 bio->bi_io_vec[0].bv_page = bp->l_page; in lbmRead()
2003 bio->bi_io_vec[0].bv_len = LOGPSIZE; in lbmRead()
2004 bio->bi_io_vec[0].bv_offset = bp->l_offset; in lbmRead()
2006 bio->bi_vcnt = 1; in lbmRead()
2007 bio->bi_iter.bi_size = LOGPSIZE; in lbmRead()
2009 bio->bi_end_io = lbmIODone; in lbmRead()
[all …]
/linux-4.1.27/drivers/block/drbd/
Ddrbd_req.h260 static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src) in drbd_req_make_private_bio()
262 struct bio *bio; in drbd_req_make_private_bio() local
263 bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */ in drbd_req_make_private_bio()
265 req->private_bio = bio; in drbd_req_make_private_bio()
267 bio->bi_private = req; in drbd_req_make_private_bio()
268 bio->bi_end_io = drbd_request_endio; in drbd_req_make_private_bio()
269 bio->bi_next = NULL; in drbd_req_make_private_bio()
276 struct bio *bio; member
306 if (m.bio) in _req_mod()
328 if (m.bio) in req_mod()
Ddrbd_req.c51 struct bio *bio_src) in drbd_req_new()
204 bio_endio(m->bio, m->error); in complete_master_bio()
295 m->bio = req->master_bio; in drbd_req_complete()
587 m->bio = NULL; in __req_mod()
1140 struct bio *bio = req->private_bio; in drbd_submit_req_private_bio() local
1141 const int rw = bio_rw(bio); in drbd_submit_req_private_bio()
1143 bio->bi_bdev = device->ldev->backing_bdev; in drbd_submit_req_private_bio()
1156 bio_endio(bio, -EIO); in drbd_submit_req_private_bio()
1158 generic_make_request(bio); in drbd_submit_req_private_bio()
1161 bio_endio(bio, -EIO); in drbd_submit_req_private_bio()
[all …]
Ddrbd_bitmap.c944 static void drbd_bm_endio(struct bio *bio, int error) in drbd_bm_endio() argument
946 struct drbd_bm_aio_ctx *ctx = bio->bi_private; in drbd_bm_endio()
949 unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page); in drbd_bm_endio()
950 int uptodate = bio_flagged(bio, BIO_UPTODATE); in drbd_bm_endio()
982 mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool); in drbd_bm_endio()
984 bio_put(bio); in drbd_bm_endio()
995 struct bio *bio = bio_alloc_drbd(GFP_NOIO); in bm_page_io_async() local
1024 bio->bi_bdev = device->ldev->md_bdev; in bm_page_io_async()
1025 bio->bi_iter.bi_sector = on_disk_sector; in bm_page_io_async()
1028 bio_add_page(bio, page, len, 0); in bm_page_io_async()
[all …]
Ddrbd_actlog.c142 struct bio *bio; in _drbd_md_sync_page_io() local
154 bio = bio_alloc_drbd(GFP_NOIO); in _drbd_md_sync_page_io()
155 bio->bi_bdev = bdev->md_bdev; in _drbd_md_sync_page_io()
156 bio->bi_iter.bi_sector = sector; in _drbd_md_sync_page_io()
158 if (bio_add_page(bio, device->md_io.page, size, 0) != size) in _drbd_md_sync_page_io()
160 bio->bi_private = device; in _drbd_md_sync_page_io()
161 bio->bi_end_io = drbd_md_endio; in _drbd_md_sync_page_io()
162 bio->bi_rw = rw; in _drbd_md_sync_page_io()
174 bio_get(bio); /* one bio_put() is in the completion handler */ in _drbd_md_sync_page_io()
178 bio_endio(bio, -EIO); in _drbd_md_sync_page_io()
[all …]
Ddrbd_worker.c68 void drbd_md_endio(struct bio *bio, int error) in drbd_md_endio() argument
72 device = bio->bi_private; in drbd_md_endio()
89 bio_put(bio); in drbd_md_endio()
173 void drbd_peer_request_endio(struct bio *bio, int error) in drbd_peer_request_endio() argument
175 struct drbd_peer_request *peer_req = bio->bi_private; in drbd_peer_request_endio()
177 int uptodate = bio_flagged(bio, BIO_UPTODATE); in drbd_peer_request_endio()
178 int is_write = bio_data_dir(bio) == WRITE; in drbd_peer_request_endio()
179 int is_discard = !!(bio->bi_rw & REQ_DISCARD); in drbd_peer_request_endio()
200 bio_put(bio); /* no need for the bio anymore */ in drbd_peer_request_endio()
211 void drbd_request_endio(struct bio *bio, int error) in drbd_request_endio() argument
[all …]
Ddrbd_int.h302 struct bio *private_bio;
317 struct bio *master_bio; /* master bio pointer */
1424 extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1449 extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1450 extern void drbd_make_request(struct request_queue *q, struct bio *bio);
1483 extern void drbd_md_endio(struct bio *bio, int error);
1484 extern void drbd_peer_request_endio(struct bio *bio, int error);
1485 extern void drbd_request_endio(struct bio *bio, int error);
1514 extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
1601 int fault_type, struct bio *bio) in drbd_generic_make_request() argument
[all …]
Ddrbd_receiver.c1370 struct bio *bios = NULL; in drbd_submit_peer_request()
1371 struct bio *bio; in drbd_submit_peer_request() local
1412 bio = bio_alloc(GFP_NOIO, nr_pages); in drbd_submit_peer_request()
1413 if (!bio) { in drbd_submit_peer_request()
1418 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request()
1419 bio->bi_bdev = device->ldev->backing_bdev; in drbd_submit_peer_request()
1420 bio->bi_rw = rw; in drbd_submit_peer_request()
1421 bio->bi_private = peer_req; in drbd_submit_peer_request()
1422 bio->bi_end_io = drbd_peer_request_endio; in drbd_submit_peer_request()
1424 bio->bi_next = bios; in drbd_submit_peer_request()
[all …]
Ddrbd_main.c149 struct bio *bio_alloc_drbd(gfp_t gfp_mask) in bio_alloc_drbd()
151 struct bio *bio; in bio_alloc_drbd() local
156 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); in bio_alloc_drbd()
157 if (!bio) in bio_alloc_drbd()
159 return bio; in bio_alloc_drbd()
1548 static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio) in _drbd_send_bio() argument
1554 bio_for_each_segment(bvec, bio, iter) { in _drbd_send_bio()
1567 static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio) in _drbd_send_zc_bio() argument
1573 bio_for_each_segment(bvec, bio, iter) { in _drbd_send_zc_bio()
2235 struct bio *bio = req->master_bio; in do_retry() local
[all …]
/linux-4.1.27/Documentation/block/
Dbiovecs.txt7 As of 3.13, biovecs should never be modified after a bio has been submitted.
9 the iterator will be modified as the bio is completed, not the biovec.
11 More specifically, old code that needed to partially complete a bio would
17 partially complete a bio is segregated into struct bvec_iter: bi_sector,
37 wrapper around bio_advance_iter() that operates on bio->bi_iter, and also
38 advances the bio integrity's iter if present.
41 a pointer to a biovec, not a bio; this is used by the bio integrity code.
50 exactly one bvec at a time - for example, bio_copy_data() in fs/bio.c,
51 which copies the contents of one bio into another. Because the biovecs
61 * Before, any code that might need to use the biovec after the bio had been
[all …]
Drequest.txt82 struct bio *bio DBI First bio in request
84 struct bio *biotail DBI Last bio in request
Ddata-integrity.txt131 The data integrity patches add a new field to struct bio when
132 CONFIG_BLK_DEV_INTEGRITY is enabled. bio_integrity(bio) returns a
133 pointer to a struct bip which contains the bio integrity payload.
134 Essentially a bip is a trimmed down struct bio which holds a bio_vec
138 A kernel subsystem can enable data integrity protection on a bio by
139 calling bio_integrity_alloc(bio). This will allocate and attach the
140 bip to the bio.
195 int bio_integrity_prep(bio);
198 filesystem must call bio_integrity_prep(bio).
200 Prior to calling this function, the bio data direction and start
[all …]
Dbiodoc.txt15 context of the bio rewrite. The idea is to bring out some of the key
23 2.5 bio rewrite:
37 The following people helped with fixes/contributions to the bio patches
58 2.2 The bio struct in detail (multi-page io unit)
62 3.2 Generic bio helper routines
194 cases, a bounce bio representing a buffer from the supported memory range
256 The flags and rw fields in the bio structure can be used for some tuning
272 requests. Some bits in the bi_rw flags field in the bio structure are
303 For passing request data, the caller must build up a bio descriptor
305 bio segments or uses the block layer end*request* functions for i/o
[all …]
Dwriteback_cache_control.txt23 The REQ_FLUSH flag can be OR ed into the r/w flags of a bio submitted from
27 storage before the flagged bio starts. In addition the REQ_FLUSH flag can be
28 set on an otherwise empty bio structure, which causes only an explicit cache
36 The REQ_FUA flag can be OR ed into the r/w flags of a bio submitted from the
47 may both be set on a single bio.
Dnull_blk.txt17 No block-layer (Known as bio-based)
19 - Directly accepts bio data structure and returns them.
60 defaults to 1 on single-queue and bio-based instances. For multi-queue,
/linux-4.1.27/drivers/scsi/osd/
Dosd_initiator.c450 if (unlikely(rq->bio)) in _put_request()
718 struct bio *bio; in _osd_req_list_objects() local
725 WARN_ON(or->in.bio); in _osd_req_list_objects()
726 bio = bio_map_kern(q, list, len, or->alloc_flags); in _osd_req_list_objects()
727 if (IS_ERR(bio)) { in _osd_req_list_objects()
729 return PTR_ERR(bio); in _osd_req_list_objects()
732 bio->bi_rw &= ~REQ_WRITE; in _osd_req_list_objects()
733 or->in.bio = bio; in _osd_req_list_objects()
734 or->in.total_bytes = bio->bi_iter.bi_size; in _osd_req_list_objects()
826 struct bio *bio, u64 len) in osd_req_write() argument
[all …]
/linux-4.1.27/drivers/s390/block/
Dxpram.c184 static void xpram_make_request(struct request_queue *q, struct bio *bio) in xpram_make_request() argument
186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; in xpram_make_request()
193 if ((bio->bi_iter.bi_sector & 7) != 0 || in xpram_make_request()
194 (bio->bi_iter.bi_size & 4095) != 0) in xpram_make_request()
197 if ((bio->bi_iter.bi_size >> 12) > xdev->size) in xpram_make_request()
200 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) in xpram_make_request()
202 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; in xpram_make_request()
203 bio_for_each_segment(bvec, bio, iter) { in xpram_make_request()
211 if (bio_data_dir(bio) == READ) { in xpram_make_request()
223 set_bit(BIO_UPTODATE, &bio->bi_flags); in xpram_make_request()
[all …]
Ddcssblk.c30 static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
819 dcssblk_make_request(struct request_queue *q, struct bio *bio) in dcssblk_make_request() argument
830 dev_info = bio->bi_bdev->bd_disk->private_data; in dcssblk_make_request()
833 if ((bio->bi_iter.bi_sector & 7) != 0 || in dcssblk_make_request()
834 (bio->bi_iter.bi_size & 4095) != 0) in dcssblk_make_request()
837 if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { in dcssblk_make_request()
848 if (bio_data_dir(bio) == WRITE) { in dcssblk_make_request()
857 index = (bio->bi_iter.bi_sector >> 3); in dcssblk_make_request()
858 bio_for_each_segment(bvec, bio, iter) { in dcssblk_make_request()
865 if (bio_data_dir(bio) == READ) { in dcssblk_make_request()
[all …]
Ddasd_diag.c60 struct dasd_diag_bio bio[0]; member
184 private->iob.bio_list = dreq->bio; in dasd_start_diag()
319 struct dasd_diag_bio bio; in dasd_diag_check_device() local
402 memset(&bio, 0, sizeof (struct dasd_diag_bio)); in dasd_diag_check_device()
403 bio.type = MDSK_READ_REQ; in dasd_diag_check_device()
404 bio.block_number = private->pt_block + 1; in dasd_diag_check_device()
405 bio.buffer = label; in dasd_diag_check_device()
412 private->iob.bio_list = &bio; in dasd_diag_check_device()
545 dbio = dreq->bio; in dasd_diag_build_cp()
/linux-4.1.27/fs/gfs2/
Dlops.c205 static void gfs2_end_log_write(struct bio *bio, int error) in gfs2_end_log_write() argument
207 struct gfs2_sbd *sdp = bio->bi_private; in gfs2_end_log_write()
217 bio_for_each_segment_all(bvec, bio, i) { in gfs2_end_log_write()
225 bio_put(bio); in gfs2_end_log_write()
261 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno) in gfs2_log_alloc_bio()
265 struct bio *bio; in gfs2_log_alloc_bio() local
270 bio = bio_alloc(GFP_NOIO, nrvecs); in gfs2_log_alloc_bio()
271 if (likely(bio)) in gfs2_log_alloc_bio()
276 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); in gfs2_log_alloc_bio()
277 bio->bi_bdev = sb->s_bdev; in gfs2_log_alloc_bio()
[all …]
Dops_fstype.c174 static void end_bio_io_page(struct bio *bio, int error) in end_bio_io_page() argument
176 struct page *page = bio->bi_private; in end_bio_io_page()
232 struct bio *bio; in gfs2_read_super() local
242 bio = bio_alloc(GFP_NOFS, 1); in gfs2_read_super()
243 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); in gfs2_read_super()
244 bio->bi_bdev = sb->s_bdev; in gfs2_read_super()
245 bio_add_page(bio, page, PAGE_SIZE, 0); in gfs2_read_super()
247 bio->bi_end_io = end_bio_io_page; in gfs2_read_super()
248 bio->bi_private = page; in gfs2_read_super()
249 submit_bio(READ_SYNC | REQ_META, bio); in gfs2_read_super()
[all …]
/linux-4.1.27/drivers/scsi/
Dsd_dif.c112 struct bio *bio; in sd_dif_prepare() local
124 __rq_for_each_bio(bio, scmd->request) { in sd_dif_prepare()
125 struct bio_integrity_payload *bip = bio_integrity(bio); in sd_dif_prepare()
163 struct bio *bio; in sd_dif_complete() local
176 __rq_for_each_bio(bio, scmd->request) { in sd_dif_complete()
177 struct bio_integrity_payload *bip = bio_integrity(bio); in sd_dif_complete()
Dst.h32 struct bio *bio; member
Dsd.c597 struct bio *bio = scmd->request->bio; in sd_setup_protect_cmnd() local
602 if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM)) in sd_setup_protect_cmnd()
605 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) in sd_setup_protect_cmnd()
612 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) in sd_setup_protect_cmnd()
619 if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK)) in sd_setup_protect_cmnd()
820 struct bio *bio = rq->bio; in sd_setup_write_same_cmnd() local
829 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); in sd_setup_write_same_cmnd()
Dosst.h638 struct bio *bio; member
/linux-4.1.27/include/scsi/
Dosd_initiator.h143 struct bio *bio; member
430 const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
434 const struct osd_obj_id *, struct bio *data_out);/* NI */
436 const struct osd_obj_id *, struct bio *data_out, u64 offset);/* NI */
447 const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
453 const struct osd_obj_id *obj, struct bio *bio,
456 const struct osd_obj_id *obj, struct bio *bio,
Dosd_sec.h40 void osd_sec_sign_data(void *data_integ, struct bio *bio, const u8 *cap_key);
Dosd_ore.h158 struct bio *bio; member
/linux-4.1.27/kernel/trace/
Dblktrace.c775 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, in blk_add_trace_bio() argument
783 if (!error && !bio_flagged(bio, BIO_UPTODATE)) in blk_add_trace_bio()
786 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, in blk_add_trace_bio()
787 bio->bi_rw, what, error, 0, NULL); in blk_add_trace_bio()
791 struct request_queue *q, struct bio *bio) in blk_add_trace_bio_bounce() argument
793 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); in blk_add_trace_bio_bounce()
797 struct request_queue *q, struct bio *bio, in blk_add_trace_bio_complete() argument
800 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); in blk_add_trace_bio_complete()
806 struct bio *bio) in blk_add_trace_bio_backmerge() argument
808 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); in blk_add_trace_bio_backmerge()
[all …]
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-class-pktcdvd42 size (0444) Contains the size of the bio write
45 congestion_off (0644) If bio write queue size is below
46 this mark, accept new bio requests
49 congestion_on (0644) If bio write queue size is higher
51 bio write requests from the block
53 device has processed enough bio's
54 so that bio write queue size is
Dsysfs-block-dm38 bio-based DM devices so it will only ever report 0 for
/linux-4.1.27/fs/exofs/
Dore.c47 (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),};
328 if (per_dev->bio) in ore_put_io_state()
329 bio_put(per_dev->bio); in ore_put_io_state()
405 static void _clear_bio(struct bio *bio) in _clear_bio() argument
410 bio_for_each_segment_all(bv, bio, i) { in _clear_bio()
440 per_dev->bio) { in ore_check_io()
445 _clear_bio(per_dev->bio); in ore_check_io()
603 if (per_dev->bio == NULL) { in _ore_add_stripe_unit()
615 per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size); in _ore_add_stripe_unit()
616 if (unlikely(!per_dev->bio)) { in _ore_add_stripe_unit()
[all …]
Dore_raid.c360 per_dev->bio = bio_kmalloc(GFP_KERNEL, in _add_to_r4w()
362 if (unlikely(!per_dev->bio)) { in _add_to_r4w()
375 added_len = bio_add_pc_page(q, per_dev->bio, page, pg_len, in _add_to_r4w()
379 per_dev->bio->bi_vcnt); in _add_to_r4w()
435 struct bio *bio = ios->per_dev[d].bio; in _mark_read4write_pages_uptodate() local
437 if (!bio) in _mark_read4write_pages_uptodate()
440 bio_for_each_segment_all(bv, bio, i) { in _mark_read4write_pages_uptodate()
/linux-4.1.27/fs/hfsplus/
Dwrapper.c49 struct bio *bio; in hfsplus_submit_bio() local
65 bio = bio_alloc(GFP_NOIO, 1); in hfsplus_submit_bio()
66 bio->bi_iter.bi_sector = sector; in hfsplus_submit_bio()
67 bio->bi_bdev = sb->s_bdev; in hfsplus_submit_bio()
77 ret = bio_add_page(bio, virt_to_page(buf), len, page_offset); in hfsplus_submit_bio()
86 ret = submit_bio_wait(rw, bio); in hfsplus_submit_bio()
88 bio_put(bio); in hfsplus_submit_bio()
/linux-4.1.27/drivers/block/aoe/
Daoecmd.c297 skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter) in skb_fillup() argument
302 __bio_for_each_segment(bv, bio, iter, iter) in skb_fillup()
352 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) { in ata_rw_frameinit()
353 skb_fillup(skb, f->buf->bio, f->iter); in ata_rw_frameinit()
389 bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size); in aoecmd_ata_rw()
848 struct bio *bio; in rqbiocnt() local
851 __rq_for_each_bio(bio, r) in rqbiocnt()
867 bio_pageinc(struct bio *bio) in bio_pageinc() argument
873 bio_for_each_segment(bv, bio, iter) { in bio_pageinc()
883 bio_pagedec(struct bio *bio) in bio_pagedec() argument
[all …]
Daoe.h103 struct bio *bio; member
176 struct bio *nxbio;
Daoedev.c164 struct bio *bio; in aoe_failip() local
172 while ((bio = d->ip.nxbio)) { in aoe_failip()
173 clear_bit(BIO_UPTODATE, &bio->bi_flags); in aoe_failip()
174 d->ip.nxbio = bio->bi_next; in aoe_failip()
/linux-4.1.27/fs/xfs/
Dxfs_aops.c354 struct bio *bio, in xfs_end_bio() argument
357 xfs_ioend_t *ioend = bio->bi_private; in xfs_end_bio()
359 ASSERT(atomic_read(&bio->bi_cnt) >= 1); in xfs_end_bio()
360 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; in xfs_end_bio()
363 bio->bi_private = NULL; in xfs_end_bio()
364 bio->bi_end_io = NULL; in xfs_end_bio()
365 bio_put(bio); in xfs_end_bio()
374 struct bio *bio) in xfs_submit_ioend_bio() argument
377 bio->bi_private = ioend; in xfs_submit_ioend_bio()
378 bio->bi_end_io = xfs_end_bio; in xfs_submit_ioend_bio()
[all …]
Dxfs_buf.c1106 struct bio *bio, in xfs_buf_bio_end_io() argument
1109 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; in xfs_buf_bio_end_io()
1127 bio_put(bio); in xfs_buf_bio_end_io()
1141 struct bio *bio; in xfs_buf_ioapply_map() local
1170 bio = bio_alloc(GFP_NOIO, nr_pages); in xfs_buf_ioapply_map()
1171 bio->bi_bdev = bp->b_target->bt_bdev; in xfs_buf_ioapply_map()
1172 bio->bi_iter.bi_sector = sector; in xfs_buf_ioapply_map()
1173 bio->bi_end_io = xfs_buf_bio_end_io; in xfs_buf_ioapply_map()
1174 bio->bi_private = bp; in xfs_buf_ioapply_map()
1183 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, in xfs_buf_ioapply_map()
[all …]
/linux-4.1.27/arch/xtensa/platforms/iss/
Dsimdisk.c104 static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio) in simdisk_xfer_bio() argument
108 sector_t sector = bio->bi_iter.bi_sector; in simdisk_xfer_bio()
110 bio_for_each_segment(bvec, bio, iter) { in simdisk_xfer_bio()
111 char *buffer = __bio_kmap_atomic(bio, iter); in simdisk_xfer_bio()
115 bio_data_dir(bio) == WRITE); in simdisk_xfer_bio()
122 static void simdisk_make_request(struct request_queue *q, struct bio *bio) in simdisk_make_request() argument
125 int status = simdisk_xfer_bio(dev, bio); in simdisk_make_request()
126 bio_endio(bio, status); in simdisk_make_request()
/linux-4.1.27/arch/powerpc/sysdev/
Daxonram.c107 axon_ram_make_request(struct request_queue *queue, struct bio *bio) in axon_ram_make_request() argument
109 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; in axon_ram_make_request()
116 phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << in axon_ram_make_request()
120 bio_for_each_segment(vec, bio, iter) { in axon_ram_make_request()
122 bio_io_error(bio); in axon_ram_make_request()
127 if (bio_data_dir(bio) == READ) in axon_ram_make_request()
135 bio_endio(bio, 0); in axon_ram_make_request()
/linux-4.1.27/include/linux/ceph/
Dmessenger.h103 struct bio *bio; member
127 struct bio *bio; /* bio from list */ member
291 extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
Dosd_client.h70 struct bio *bio; /* list of bios */ member
285 struct bio *bio, size_t bio_length);
/linux-4.1.27/arch/m68k/emu/
Dnfblock.c62 static void nfhd_make_request(struct request_queue *queue, struct bio *bio) in nfhd_make_request() argument
68 sector_t sec = bio->bi_iter.bi_sector; in nfhd_make_request()
70 dir = bio_data_dir(bio); in nfhd_make_request()
72 bio_for_each_segment(bvec, bio, iter) { in nfhd_make_request()
79 bio_endio(bio, 0); in nfhd_make_request()
/linux-4.1.27/drivers/block/xen-blkback/
Dblkback.c1074 static void end_block_io_op(struct bio *bio, int error) in end_block_io_op() argument
1076 __end_block_io_op(bio->bi_private, error); in end_block_io_op()
1077 bio_put(bio); in end_block_io_op()
1195 struct bio *bio = NULL; in dispatch_rw_block_io() local
1196 struct bio **biolist = pending_req->biolist; in dispatch_rw_block_io()
1322 while ((bio == NULL) || in dispatch_rw_block_io()
1323 (bio_add_page(bio, in dispatch_rw_block_io()
1329 bio = bio_alloc(GFP_KERNEL, nr_iovecs); in dispatch_rw_block_io()
1330 if (unlikely(bio == NULL)) in dispatch_rw_block_io()
1333 biolist[nbio++] = bio; in dispatch_rw_block_io()
[all …]
/linux-4.1.27/drivers/scsi/libsas/
Dsas_host_smp.c239 if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE || in sas_smp_host_handler()
240 bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) { in sas_smp_host_handler()
258 buf = kmap_atomic(bio_page(req->bio)); in sas_smp_host_handler()
260 kunmap_atomic(buf - bio_offset(req->bio)); in sas_smp_host_handler()
373 buf = kmap_atomic(bio_page(rsp->bio)); in sas_smp_host_handler()
375 flush_kernel_dcache_page(bio_page(rsp->bio)); in sas_smp_host_handler()
376 kunmap_atomic(buf - bio_offset(rsp->bio)); in sas_smp_host_handler()
/linux-4.1.27/fs/f2fs/
Ddata.c31 static void f2fs_read_end_io(struct bio *bio, int err) in f2fs_read_end_io() argument
36 bio_for_each_segment_all(bvec, bio, i) { in f2fs_read_end_io()
47 bio_put(bio); in f2fs_read_end_io()
50 static void f2fs_write_end_io(struct bio *bio, int err) in f2fs_write_end_io() argument
52 struct f2fs_sb_info *sbi = bio->bi_private; in f2fs_write_end_io()
56 bio_for_each_segment_all(bvec, bio, i) { in f2fs_write_end_io()
72 bio_put(bio); in f2fs_write_end_io()
78 static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, in __bio_alloc()
81 struct bio *bio; in __bio_alloc() local
84 bio = bio_alloc(GFP_NOIO, npages); in __bio_alloc()
[all …]
/linux-4.1.27/drivers/block/zram/
Dzram_drv.c741 int offset, struct bio *bio) in zram_bio_discard() argument
743 size_t n = bio->bi_iter.bi_size; in zram_bio_discard()
922 static void __zram_make_request(struct zram *zram, struct bio *bio) in __zram_make_request() argument
929 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; in __zram_make_request()
930 offset = (bio->bi_iter.bi_sector & in __zram_make_request()
933 if (unlikely(bio->bi_rw & REQ_DISCARD)) { in __zram_make_request()
934 zram_bio_discard(zram, index, offset, bio); in __zram_make_request()
935 bio_endio(bio, 0); in __zram_make_request()
939 rw = bio_data_dir(bio); in __zram_make_request()
940 bio_for_each_segment(bvec, bio, iter) { in __zram_make_request()
[all …]
/linux-4.1.27/fs/ocfs2/cluster/
Dheartbeat.c375 static void o2hb_bio_end_io(struct bio *bio, in o2hb_bio_end_io() argument
378 struct o2hb_bio_wait_ctxt *wc = bio->bi_private; in o2hb_bio_end_io()
386 bio_put(bio); in o2hb_bio_end_io()
391 static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, in o2hb_setup_one_bio()
401 struct bio *bio; in o2hb_setup_one_bio() local
408 bio = bio_alloc(GFP_ATOMIC, 16); in o2hb_setup_one_bio()
409 if (!bio) { in o2hb_setup_one_bio()
411 bio = ERR_PTR(-ENOMEM); in o2hb_setup_one_bio()
416 bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); in o2hb_setup_one_bio()
417 bio->bi_bdev = reg->hr_bdev; in o2hb_setup_one_bio()
[all …]
/linux-4.1.27/net/ceph/
Dmessenger.c832 struct bio *bio; in ceph_msg_data_bio_cursor_init() local
836 bio = data->bio; in ceph_msg_data_bio_cursor_init()
837 BUG_ON(!bio); in ceph_msg_data_bio_cursor_init()
840 cursor->bio = bio; in ceph_msg_data_bio_cursor_init()
841 cursor->bvec_iter = bio->bi_iter; in ceph_msg_data_bio_cursor_init()
843 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter); in ceph_msg_data_bio_cursor_init()
851 struct bio *bio; in ceph_msg_data_bio_next() local
856 bio = cursor->bio; in ceph_msg_data_bio_next()
857 BUG_ON(!bio); in ceph_msg_data_bio_next()
859 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); in ceph_msg_data_bio_next()
[all …]
/linux-4.1.27/Documentation/device-mapper/
Ddm-io.txt36 The second I/O service type takes an array of bio vectors as the data buffer
37 for the I/O. This service can be handy if the caller has a pre-assembled bio,
38 but wants to direct different portions of the bio to different devices.
Ddm-flakey.txt38 each matching bio with <value>.
45 <flags>: Perform the replacement only if bio->bi_rw has all the
Dcache.txt127 On-disk metadata is committed every time a FLUSH or FUA bio is written.
233 #read hits : Number of times a READ bio has been mapped
235 #read misses : Number of times a READ bio has been mapped
237 #write hits : Number of times a WRITE bio has been mapped
239 #write misses : Number of times a WRITE bio has been
/linux-4.1.27/Documentation/DocBook/
D.filesystems.xml.cmd2 …c fs/super.c fs/locks.c fs/locks.c fs/mpage.c fs/namei.c fs/buffer.c block/bio.c fs/seq_file.c fs/…
/linux-4.1.27/drivers/scsi/mpt2sas/
Dmpt2sas_transport.c1944 if (bio_multiple_segments(req->bio)) { in _transport_smp_handler()
1957 bio_for_each_segment(bvec, req->bio, iter) { in _transport_smp_handler()
1964 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), in _transport_smp_handler()
1976 if (bio_multiple_segments(rsp->bio)) { in _transport_smp_handler()
1986 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), in _transport_smp_handler()
2043 if (bio_multiple_segments(req->bio)) { in _transport_smp_handler()
2059 if (bio_multiple_segments(rsp->bio)) { in _transport_smp_handler()
2104 if (bio_multiple_segments(rsp->bio)) { in _transport_smp_handler()
2108 bio_for_each_segment(bvec, rsp->bio, iter) { in _transport_smp_handler()
/linux-4.1.27/drivers/scsi/mpt3sas/
Dmpt3sas_transport.c1927 if (bio_multiple_segments(req->bio)) { in _transport_smp_handler()
1940 bio_for_each_segment(bvec, req->bio, iter) { in _transport_smp_handler()
1947 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), in _transport_smp_handler()
1959 if (bio_multiple_segments(rsp->bio)) { in _transport_smp_handler()
1969 dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), in _transport_smp_handler()
2020 if (bio_multiple_segments(req->bio)) in _transport_smp_handler()
2065 if (bio_multiple_segments(rsp->bio)) { in _transport_smp_handler()
2069 bio_for_each_segment(bvec, rsp->bio, iter) { in _transport_smp_handler()
/linux-4.1.27/drivers/staging/lustre/lustre/include/linux/
Dlustre_compat25.h151 #define bio_hw_segments(q, bio) 0 argument
/linux-4.1.27/Documentation/devicetree/bindings/mfd/
Dda9063.txt32 bio : BUCK IO

12