/linux-4.1.27/drivers/md/ |
D | dm-bio-record.h | 23 struct bvec_iter bi_iter; member 30 bd->bi_iter = bio->bi_iter; in dm_bio_record() 37 bio->bi_iter = bd->bi_iter; in dm_bio_restore()
|
D | faulty.c | 77 b->bi_iter.bi_size = bio->bi_iter.bi_size; in faulty_fail() 78 b->bi_iter.bi_sector = bio->bi_iter.bi_sector; in faulty_fail() 188 if (check_sector(conf, bio->bi_iter.bi_sector, in make_request() 192 add_sector(conf, bio->bi_iter.bi_sector, in make_request() 200 if (check_sector(conf, bio->bi_iter.bi_sector, in make_request() 206 add_sector(conf, bio->bi_iter.bi_sector, in make_request() 211 add_sector(conf, bio->bi_iter.bi_sector, in make_request()
|
D | linear.c | 272 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); in linear_make_request() 278 if (unlikely(bio->bi_iter.bi_sector >= end_sector || in linear_make_request() 279 bio->bi_iter.bi_sector < start_sector)) in linear_make_request() 287 bio->bi_iter.bi_sector, in linear_make_request() 294 split->bi_iter.bi_sector = split->bi_iter.bi_sector - in linear_make_request() 311 (unsigned long long)bio->bi_iter.bi_sector, in linear_make_request()
|
D | multipath.c | 101 (unsigned long long)bio->bi_iter.bi_sector); in multipath_end_request() 135 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; in multipath_make_request() 351 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; in multipathd() 357 (unsigned long long)bio->bi_iter.bi_sector); in multipathd() 363 (unsigned long long)bio->bi_iter.bi_sector); in multipathd() 365 bio->bi_iter.bi_sector += in multipathd()
|
D | dm-stripe.c | 264 stripe_map_range_sector(sc, bio->bi_iter.bi_sector, in stripe_map_range() 270 bio->bi_iter.bi_sector = begin + in stripe_map_range() 272 bio->bi_iter.bi_size = to_bytes(end - begin); in stripe_map_range() 300 stripe_map_sector(sc, bio->bi_iter.bi_sector, in stripe_map() 301 &stripe, &bio->bi_iter.bi_sector); in stripe_map() 303 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; in stripe_map()
|
D | raid1.c | 241 sector_t bi_sector = bio->bi_iter.bi_sector; in call_bio_endio() 277 (unsigned long long) bio->bi_iter.bi_sector, in raid_end_bio_io() 477 (unsigned long long) mbio->bi_iter.bi_sector, in raid1_end_write_request() 880 <= bio->bi_iter.bi_sector)) in need_to_wait_for_sync() 917 if (bio->bi_iter.bi_sector >= in wait_barrier() 925 <= bio->bi_iter.bi_sector) in wait_barrier() 1032 bio->bi_iter.bi_size); in alloc_behind_pages() 1112 bio->bi_iter.bi_sector < mddev->suspend_hi) || in make_request() 1114 md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { in make_request() 1125 bio->bi_iter.bi_sector >= mddev->suspend_hi || in make_request() [all …]
|
D | dm-delay.c | 280 bio->bi_iter.bi_sector = dc->start_write + in delay_map() 281 dm_target_offset(ti, bio->bi_iter.bi_sector); in delay_map() 287 bio->bi_iter.bi_sector = dc->start_read + in delay_map() 288 dm_target_offset(ti, bio->bi_iter.bi_sector); in delay_map()
|
D | raid10.c | 1173 bio->bi_iter.bi_sector < conf->reshape_progress && in __make_request() 1174 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in __make_request() 1180 conf->reshape_progress <= bio->bi_iter.bi_sector || in __make_request() 1181 conf->reshape_progress >= bio->bi_iter.bi_sector + in __make_request() 1188 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in __make_request() 1189 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in __make_request() 1190 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in __make_request() 1191 bio->bi_iter.bi_sector < conf->reshape_progress))) { in __make_request() 1209 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request() 1238 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request() [all …]
|
D | dm-io.c | 223 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in bio_dp_init() 224 dp->context_u = bio->bi_iter.bi_bvec_done; in bio_dp_init() 321 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); in do_region() 328 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; in do_region() 337 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; in do_region()
|
D | dm-log-writes.c | 203 bio->bi_iter.bi_size = 0; in write_metadata() 204 bio->bi_iter.bi_sector = sector; in write_metadata() 268 bio->bi_iter.bi_size = 0; in log_one_block() 269 bio->bi_iter.bi_sector = sector; in log_one_block() 290 bio->bi_iter.bi_size = 0; in log_one_block() 291 bio->bi_iter.bi_sector = sector; in log_one_block() 601 block->sector = bio->bi_iter.bi_sector; in log_writes_map()
|
D | dm-linear.c | 88 bio->bi_iter.bi_sector = in linear_map_bio() 89 linear_map_sector(ti, bio->bi_iter.bi_sector); in linear_map_bio()
|
D | dm-flakey.c | 251 bio->bi_iter.bi_sector = in flakey_map_bio() 252 flakey_map_sector(ti, bio->bi_iter.bi_sector); in flakey_map_bio() 270 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); in corrupt_bio_data()
|
D | raid0.c | 501 ((bio->bi_iter.bi_sector & (chunk_sects-1)) in is_io_in_chunk_boundary() 504 sector_t sector = bio->bi_iter.bi_sector; in is_io_in_chunk_boundary() 522 sector_t sector = bio->bi_iter.bi_sector; in raid0_make_request() 531 sector = bio->bi_iter.bi_sector; in raid0_make_request() 543 split->bi_iter.bi_sector = sector + zone->dev_start + in raid0_make_request()
|
D | dm-verity.c | 552 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); in verity_map() 554 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & in verity_map() 573 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); in verity_map() 574 io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits; in verity_map() 578 io->iter = bio->bi_iter; in verity_map()
|
D | dm-crypt.c | 810 ctx->iter_in = bio_in->bi_iter; in crypt_convert_init() 812 ctx->iter_out = bio_out->bi_iter; in crypt_convert_init() 1011 clone->bi_iter.bi_size += len; in crypt_alloc_buffer() 1143 clone->bi_iter.bi_sector = cc->start + io->sector; in kcryptd_io_read() 1251 clone->bi_iter.bi_sector = cc->start + io->sector; in kcryptd_crypt_write_io_submit() 1290 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); in kcryptd_crypt_write_convert() 1297 io->ctx.iter_out = clone->bi_iter; in kcryptd_crypt_write_convert() 1896 bio->bi_iter.bi_sector = cc->start + in crypt_map() 1897 dm_target_offset(ti, bio->bi_iter.bi_sector); in crypt_map() 1902 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); in crypt_map()
|
D | dm-thin.c | 553 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block() 566 sector_t bi_sector = bio->bi_iter.bi_sector; in remap() 570 bio->bi_iter.bi_sector = in remap() 574 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap() 923 return bio->bi_iter.bi_size == in io_overlaps_block() 1481 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { in process_shared_bio() 1506 if (!bio->bi_iter.bi_size) { in provision_block() 1578 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell() 1580 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell() 1629 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) { in __process_bio_read_only() [all …]
|
D | dm-raid1.c | 435 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; in mirror_available() 445 if (unlikely(!bio->bi_iter.bi_size)) in map_sector() 447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); in map_sector() 453 bio->bi_iter.bi_sector = map_sector(m, bio); in map_bio() 562 m = choose_mirror(ms, bio->bi_iter.bi_sector); in do_reads() 1193 m = choose_mirror(ms, bio->bi_iter.bi_sector); in mirror_map()
|
D | dm.c | 656 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in start_io_acct() 671 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in end_io_acct() 934 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { in dec_pending() 1002 unsigned int nr_bytes = info->orig->bi_iter.bi_size; in end_clone_bio() 1421 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; in dm_accept_partial_bio() 1426 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; in dm_accept_partial_bio() 1446 sector = clone->bi_iter.bi_sector; in __map_bio() 1477 bio->bi_iter.bi_sector = sector; in bio_setup_sector() 1478 bio->bi_iter.bi_size = to_bytes(len); in bio_setup_sector() 1494 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); in clone_bio() [all …]
|
D | raid5.c | 141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) in r5_next_bio() 233 bi->bi_iter.bi_size = 0; in return_io() 1009 bi->bi_iter.bi_sector = (sh->sector in ops_run_io() 1012 bi->bi_iter.bi_sector = (sh->sector in ops_run_io() 1023 bi->bi_iter.bi_size = STRIPE_SIZE; in ops_run_io() 1061 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io() 1064 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io() 1072 rbi->bi_iter.bi_size = STRIPE_SIZE; in ops_run_io() 1115 if (bio->bi_iter.bi_sector >= sector) in async_copy_data() 1116 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; in async_copy_data() [all …]
|
D | dm-snap.c | 1653 bio->bi_iter.bi_sector = in remap_exception() 1656 (bio->bi_iter.bi_sector & s->store->chunk_mask); in remap_exception() 1674 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_map() 1735 bio->bi_iter.bi_size == in snapshot_map() 1792 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_merge_map() 2168 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); in do_origin() 2275 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); in origin_map()
|
D | dm-switch.c | 322 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); in switch_map() 326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; in switch_map()
|
D | dm-cache-target.c | 719 sector_t bi_sector = bio->bi_iter.bi_sector; in remap_to_cache() 724 bio->bi_iter.bi_sector = in remap_to_cache() 728 bio->bi_iter.bi_sector = in remap_to_cache() 770 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block() 1108 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); in bio_writes_complete_block() 1120 sector_t sb = bio->bi_iter.bi_sector; in calc_discard_block_range() 1392 BUG_ON(bio->bi_iter.bi_size); in process_flush_bio()
|
D | dm-region-hash.c | 129 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - in dm_rh_bio_to_region()
|
D | dm-cache-policy-mq.c | 76 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) in iot_update_stats()
|
D | dm-era-target.c | 1186 sector_t block_nr = bio->bi_iter.bi_sector; in get_block()
|
D | dm-bufio.c | 604 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; in use_inline_bio()
|
D | md.c | 436 if (bio->bi_iter.bi_size == 0) in md_submit_flush_data() 762 bio->bi_iter.bi_sector = sector; in md_super_write() 786 bio->bi_iter.bi_sector = sector + rdev->sb_start; in sync_page_io() 790 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; in sync_page_io() 792 bio->bi_iter.bi_sector = sector + rdev->data_offset; in sync_page_io()
|
/linux-4.1.27/drivers/md/bcache/ |
D | request.c | 121 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 130 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate() 131 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate() 134 &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); in bch_data_invalidate() 224 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); in bch_data_insert_start() 383 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || in check_should_bypass() 407 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) in check_should_bypass() 408 if (i->last == bio->bi_iter.bi_sector && in check_should_bypass() 417 if (i->sequential + bio->bi_iter.bi_size > i->sequential) in check_should_bypass() 418 i->sequential += bio->bi_iter.bi_size; in check_should_bypass() [all …]
|
D | debug.c | 53 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in bch_btree_verify() 54 bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; in bch_btree_verify() 132 (uint64_t) bio->bi_iter.bi_sector); in bch_data_verify()
|
D | util.c | 227 size_t size = bio->bi_iter.bi_size; in bch_bio_map() 230 BUG_ON(!bio->bi_iter.bi_size); in bch_bio_map()
|
D | journal.c | 55 bio->bi_iter.bi_sector = bucket + offset; in journal_read_bucket() 58 bio->bi_iter.bi_size = len << 9; in journal_read_bucket() 452 bio->bi_iter.bi_sector = bucket_to_sector(ca->set, in do_journal_discard() 458 bio->bi_iter.bi_size = bucket_bytes(ca); in do_journal_discard() 625 bio->bi_iter.bi_sector = PTR_OFFSET(k, i); in journal_write_unlocked() 628 bio->bi_iter.bi_size = sectors << 9; in journal_write_unlocked()
|
D | io.c | 27 .bi_sector = bio->bi_iter.bi_sector, in bch_bio_max_sectors() 137 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in __bch_submit_bbio()
|
D | writeback.h | 53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, in should_writeback()
|
D | movinggc.c | 87 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9; in moving_init() 103 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); in write_moving()
|
D | writeback.c | 114 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; in dirty_init() 187 io->bio.bi_iter.bi_sector = KEY_START(&w->key); in write_dirty() 256 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); in read_dirty()
|
D | super.c | 237 bio->bi_iter.bi_sector = SB_SECTOR; in __write_super() 239 bio->bi_iter.bi_size = SB_SIZE; in __write_super() 375 bio->bi_iter.bi_size = KEY_SIZE(k) << 9; in uuid_io() 531 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io() 534 bio->bi_iter.bi_size = bucket_bytes(ca); in prio_io()
|
D | btree.c | 299 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; in bch_btree_node_read() 401 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c)); in do_btree_node_write()
|
/linux-4.1.27/fs/logfs/ |
D | dev_bdev.c | 30 bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); in sync_request() 31 bio.bi_iter.bi_size = PAGE_SIZE; in sync_request() 95 bio->bi_iter.bi_size = i * PAGE_SIZE; in __bdev_writeseg() 97 bio->bi_iter.bi_sector = ofs >> 9; in __bdev_writeseg() 122 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; in __bdev_writeseg() 124 bio->bi_iter.bi_sector = ofs >> 9; in __bdev_writeseg() 187 bio->bi_iter.bi_size = i * PAGE_SIZE; in do_erase() 189 bio->bi_iter.bi_sector = ofs >> 9; in do_erase() 208 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE; in do_erase() 210 bio->bi_iter.bi_sector = ofs >> 9; in do_erase()
|
/linux-4.1.27/include/trace/events/ |
D | bcache.h | 27 __entry->sector = bio->bi_iter.bi_sector; 28 __entry->orig_sector = bio->bi_iter.bi_sector - 16; 29 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 30 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 102 __entry->sector = bio->bi_iter.bi_sector; 103 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 104 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 137 __entry->sector = bio->bi_iter.bi_sector; 138 __entry->nr_sector = bio->bi_iter.bi_size >> 9; 139 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); [all …]
|
D | block.h | 273 __entry->sector = bio->bi_iter.bi_sector; 275 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 310 __entry->sector = bio->bi_iter.bi_sector; 313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 338 __entry->sector = bio->bi_iter.bi_sector; 340 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 405 __entry->sector = bio->bi_iter.bi_sector; 407 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size); 433 __entry->sector = bio ? bio->bi_iter.bi_sector : 0; 568 __entry->sector = bio->bi_iter.bi_sector; [all …]
|
D | f2fs.h | 757 __entry->sector = bio->bi_iter.bi_sector; 758 __entry->size = bio->bi_iter.bi_size;
|
/linux-4.1.27/block/ |
D | blk-lib.c | 111 bio->bi_iter.bi_sector = sector; in blkdev_issue_discard() 116 bio->bi_iter.bi_size = req_sects << 9; in blkdev_issue_discard() 185 bio->bi_iter.bi_sector = sector; in blkdev_issue_write_same() 195 bio->bi_iter.bi_size = max_write_same_sectors << 9; in blkdev_issue_write_same() 199 bio->bi_iter.bi_size = nr_sects << 9; in blkdev_issue_write_same() 251 bio->bi_iter.bi_sector = sector; in __blkdev_issue_zeroout()
|
D | bio.c | 568 bio->bi_iter = bio_src->bi_iter; in __bio_clone_fast() 650 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; in bio_clone_bioset() 651 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; in bio_clone_bioset() 715 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) in __bio_add_page() 738 .bi_sector = bio->bi_iter.bi_sector, in __bio_add_page() 739 .bi_size = bio->bi_iter.bi_size - in __bio_add_page() 750 bio->bi_iter.bi_size += len; in __bio_add_page() 776 bio->bi_iter.bi_size += len; in __bio_add_page() 800 .bi_sector = bio->bi_iter.bi_sector, in __bio_add_page() 801 .bi_size = bio->bi_iter.bi_size - len, in __bio_add_page() [all …]
|
D | blk-core.c | 131 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) in req_bio_endio() 1453 bio->bi_iter.bi_size = len; in blk_add_request_payload() 1477 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_back_merge() 1500 req->__sector = bio->bi_iter.bi_sector; in bio_attempt_front_merge() 1501 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_front_merge() 1578 req->__sector = bio->bi_iter.bi_sector; in init_request_from_bio() 1702 bio->bi_iter.bi_sector += p->start_sect; in blk_partition_remap() 1707 bio->bi_iter.bi_sector - p->start_sect); in blk_partition_remap() 1773 sector_t sector = bio->bi_iter.bi_sector; in bio_check_eod() 1809 (long long) bio->bi_iter.bi_sector); in generic_make_request_checks() [all …]
|
D | blk-map.c | 23 rq->__data_len += bio->bi_iter.bi_size; in blk_rq_append_bio() 99 if (bio->bi_iter.bi_size != iter->count) { in blk_rq_map_user_iov()
|
D | blk-throttle.c | 880 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { in tg_with_in_bps_limit() 887 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; in tg_with_in_bps_limit() 990 tg->bytes_disp[rw] += bio->bi_iter.bi_size; in throtl_charge_bio() 1007 bio->bi_iter.bi_size, bio->bi_rw); in throtl_charge_bio() 1505 bio->bi_iter.bi_size, bio->bi_rw); in blk_throtl_bio() 1561 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw], in blk_throtl_bio()
|
D | blk-merge.c | 605 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge() 607 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge()
|
D | blk-flush.c | 483 *error_sector = bio->bi_iter.bi_sector; in blkdev_issue_flush()
|
D | bio-integrity.c | 299 bip_set_seed(bip, bio->bi_iter.bi_sector); in bio_integrity_prep()
|
D | elevator.c | 443 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); in elv_merge()
|
/linux-4.1.27/include/linux/ |
D | bio.h | 93 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) 94 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 95 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 98 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) 99 #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) 100 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) 108 bio->bi_iter.bi_size && in bio_has_data() 139 return bio->bi_iter.bi_size; in bio_cur_bytes() 252 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 528 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
|
D | blk_types.h | 54 struct bvec_iter bi_iter; member
|
/linux-4.1.27/drivers/s390/block/ |
D | xpram.c | 193 if ((bio->bi_iter.bi_sector & 7) != 0 || in xpram_make_request() 194 (bio->bi_iter.bi_size & 4095) != 0) in xpram_make_request() 197 if ((bio->bi_iter.bi_size >> 12) > xdev->size) in xpram_make_request() 200 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) in xpram_make_request() 202 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; in xpram_make_request()
|
D | dcssblk.c | 833 if ((bio->bi_iter.bi_sector & 7) != 0 || in dcssblk_make_request() 834 (bio->bi_iter.bi_size & 4095) != 0) in dcssblk_make_request() 857 index = (bio->bi_iter.bi_sector >> 3); in dcssblk_make_request()
|
/linux-4.1.27/mm/ |
D | page_io.c | 34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); in get_swap_bio() 35 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; in get_swap_bio() 40 bio->bi_iter.bi_size = PAGE_SIZE; in get_swap_bio() 65 (unsigned long long)bio->bi_iter.bi_sector); in end_swap_bio_write() 83 (unsigned long long)bio->bi_iter.bi_sector); in end_swap_bio_read()
|
/linux-4.1.27/drivers/block/ |
D | pktcdvd.c | 654 if (s <= tmp->bio->bi_iter.bi_sector) in pkt_rbtree_find() 663 if (s > tmp->bio->bi_iter.bi_sector) { in pkt_rbtree_find() 668 BUG_ON(s > tmp->bio->bi_iter.bi_sector); in pkt_rbtree_find() 679 sector_t s = node->bio->bi_iter.bi_sector; in pkt_rbtree_insert() 685 if (s < tmp->bio->bi_iter.bi_sector) in pkt_rbtree_insert() 864 if (bio && (bio->bi_iter.bi_sector == in pkt_iosched_process_queue() 897 bio->bi_iter.bi_size >> 10; in pkt_iosched_process_queue() 987 (unsigned long long)bio->bi_iter.bi_sector, err); in pkt_end_io_read() 1035 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / in pkt_gather_data() 1037 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; in pkt_gather_data() [all …]
|
D | umem.c | 352 card->current_iter = card->bio->bi_iter; in add_bio() 450 page->iter = page->bio->bi_iter; in process_page() 531 (unsigned long long)bio->bi_iter.bi_sector, in mm_make_request() 532 bio->bi_iter.bi_size); in mm_make_request()
|
D | brd.c | 336 sector = bio->bi_iter.bi_sector; in brd_make_request() 342 discard_from_brd(brd, sector, bio->bi_iter.bi_size); in brd_make_request()
|
D | pmem.c | 78 sector = bio->bi_iter.bi_sector; in pmem_make_request()
|
D | ps3disk.c | 106 iter.bio->bi_iter.bi_sector); in ps3disk_scatter_gather()
|
D | rbd.c | 1321 bio->bi_iter.bi_size = len; in bio_clone_range() 1352 if (!bi || off >= bi->bi_iter.bi_size || !len) in bio_chain_clone_range() 1364 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len); in bio_chain_clone_range() 1373 if (off == bi->bi_iter.bi_size) { in bio_chain_clone_range() 2447 bio_list->bi_iter.bi_sector << SECTOR_SHIFT); in rbd_img_request_fill()
|
D | ps3vram.c | 556 loff_t offset = bio->bi_iter.bi_sector << 9; in ps3vram_do_bio()
|
D | floppy.c | 3812 bio.bi_iter.bi_size = size; in __floppy_read_block_0() 3814 bio.bi_iter.bi_sector = 0; in __floppy_read_block_0()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
D | lloop.c | 219 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; in do_bio_lustrebacked() 312 (unsigned long long)(*bio)->bi_iter.bi_sector, in loop_get_bio() 313 (*bio)->bi_iter.bi_size, in loop_get_bio() 347 (unsigned long long)old_bio->bi_iter.bi_sector, in loop_make_request() 348 old_bio->bi_iter.bi_size); in loop_make_request() 368 cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size); in loop_make_request() 379 cfs_bio_endio(bio, bio->bi_iter.bi_size, ret); in loop_handle_bio()
|
/linux-4.1.27/drivers/block/rsxx/ |
D | dev.c | 169 if (bio->bi_iter.bi_size == 0) { in rsxx_make_request() 190 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size); in rsxx_make_request()
|
D | dma.c | 700 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ in rsxx_dma_queue_bio() 709 bv_len = bio->bi_iter.bi_size; in rsxx_dma_queue_bio()
|
/linux-4.1.27/fs/btrfs/ |
D | file-item.c | 185 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; in __btrfs_lookup_bio_sums() 204 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) in __btrfs_lookup_bio_sums() 220 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; in __btrfs_lookup_bio_sums() 440 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size), in btrfs_csum_one_bio() 445 sums->len = bio->bi_iter.bi_size; in btrfs_csum_one_bio() 455 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; in btrfs_csum_one_bio() 470 bytes_left = bio->bi_iter.bi_size - total_bytes; in btrfs_csum_one_bio() 478 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) + in btrfs_csum_one_bio()
|
D | compression.c | 174 (u64)bio->bi_iter.bi_sector << 9); in end_compressed_bio_read() 378 if (bio->bi_iter.bi_size) in btrfs_submit_compressed_write() 514 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { in add_ra_bio_pages() 581 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; in btrfs_submit_compressed_read() 666 if (comp_bio->bi_iter.bi_size) in btrfs_submit_compressed_read() 695 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, in btrfs_submit_compressed_read()
|
D | raid56.c | 1066 last_end = (u64)last->bi_iter.bi_sector << 9; in rbio_add_io_page() 1067 last_end += last->bi_iter.bi_size; in rbio_add_io_page() 1087 bio->bi_iter.bi_size = 0; in rbio_add_io_page() 1089 bio->bi_iter.bi_sector = disk_start >> 9; in rbio_add_io_page() 1145 start = (u64)bio->bi_iter.bi_sector << 9; in index_rbio_pages() 1332 u64 physical = bio->bi_iter.bi_sector; in find_bio_stripe() 1359 u64 logical = bio->bi_iter.bi_sector; in find_logical_bio_stripe() 1662 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; in plug_cmp() 1663 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; in plug_cmp() 1753 rbio->bio_list_bytes = bio->bi_iter.bi_size; in raid56_parity_write() [all …]
|
D | scrub.c | 1455 bio->bi_iter.bi_sector = page->logical >> 9; in scrub_submit_raid56_bio_wait() 1514 bio->bi_iter.bi_sector = page->physical >> 9; in scrub_recheck_block() 1642 bio->bi_iter.bi_sector = page_bad->physical >> 9; in scrub_repair_page_from_good_copy() 1742 bio->bi_iter.bi_sector = sbio->physical >> 9; in scrub_add_page_to_wr_bio() 2149 bio->bi_iter.bi_sector = sbio->physical >> 9; in scrub_add_page_to_rd_bio() 2704 bio->bi_iter.bi_sector = sparity->logic_start >> 9; in scrub_parity_check_and_repair() 4212 bio->bi_iter.bi_size = 0; in write_page_nocow() 4213 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; in write_page_nocow()
|
D | extent_io.c | 2048 bio->bi_iter.bi_size = 0; in repair_io_failure() 2059 bio->bi_iter.bi_sector = sector; in repair_io_failure() 2367 bio->bi_iter.bi_sector = failrec->logical >> 9; in btrfs_create_repair_bio() 2369 bio->bi_iter.bi_size = 0; in btrfs_create_repair_bio() 2570 "mirror=%u\n", (u64)bio->bi_iter.bi_sector, err, in end_bio_extent_readpage() 2708 bio->bi_iter.bi_sector = first_sector; in btrfs_bio_alloc() 2812 contig = bio->bi_iter.bi_sector == sector; in submit_extent_page()
|
D | volumes.c | 5742 bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len; in bio_size_ok() 5757 bio->bi_iter.bi_sector = physical >> 9; in submit_stripe_bio() 5766 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, in submit_stripe_bio() 5767 name->str, dev->devid, bio->bi_iter.bi_size); in submit_stripe_bio() 5798 u64 len = bio->bi_iter.bi_size; in breakup_stripe_bio() 5823 bio->bi_iter.bi_sector = logical >> 9; in bbio_error() 5834 u64 logical = (u64)bio->bi_iter.bi_sector << 9; in btrfs_map_bio() 5842 length = bio->bi_iter.bi_size; in btrfs_map_bio()
|
D | inode.c | 1798 u64 logical = (u64)bio->bi_iter.bi_sector << 9; in btrfs_merge_bio_hook() 1806 length = bio->bi_iter.bi_size; in btrfs_merge_bio_hook() 7978 (unsigned long long)bio->bi_iter.bi_sector, in btrfs_end_dio_bio() 7979 bio->bi_iter.bi_size, err); in btrfs_end_dio_bio() 8107 u64 start_sector = orig_bio->bi_iter.bi_sector; in btrfs_submit_direct_hook() 8115 map_length = orig_bio->bi_iter.bi_size; in btrfs_submit_direct_hook() 8121 if (map_length >= orig_bio->bi_iter.bi_size) { in btrfs_submit_direct_hook() 8176 map_length = orig_bio->bi_iter.bi_size; in btrfs_submit_direct_hook() 8240 dip->bytes = dio_bio->bi_iter.bi_size; in btrfs_submit_direct() 8241 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; in btrfs_submit_direct()
|
D | check-integrity.c | 1685 bio->bi_iter.bi_sector = dev_bytenr >> 9; in btrfsic_read_block() 2982 dev_bytenr = 512 * bio->bi_iter.bi_sector; in __btrfsic_submit_bio() 2990 (unsigned long long)bio->bi_iter.bi_sector, in __btrfsic_submit_bio()
|
/linux-4.1.27/kernel/power/ |
D | block_io.c | 35 bio->bi_iter.bi_sector = sector; in submit()
|
/linux-4.1.27/fs/jfs/ |
D | jfs_metapage.c | 412 if (!bio->bi_iter.bi_size) in metapage_writepage() 434 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); in metapage_writepage() 448 if (!bio->bi_iter.bi_size) in metapage_writepage() 513 bio->bi_iter.bi_sector = in metapage_readpage()
|
D | jfs_logmgr.c | 2000 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmRead() 2007 bio->bi_iter.bi_size = LOGPSIZE; in lbmRead() 2013 bio->bi_iter.bi_size = 0; in lbmRead() 2146 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmStartIO() 2153 bio->bi_iter.bi_size = LOGPSIZE; in lbmStartIO() 2160 bio->bi_iter.bi_size = 0; in lbmStartIO()
|
/linux-4.1.27/drivers/block/zram/ |
D | zram_drv.c | 743 size_t n = bio->bi_iter.bi_size; in zram_bio_discard() 929 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; in __zram_make_request() 930 offset = (bio->bi_iter.bi_sector & in __zram_make_request() 986 if (!valid_io_request(zram, bio->bi_iter.bi_sector, in zram_make_request() 987 bio->bi_iter.bi_size)) { in zram_make_request()
|
/linux-4.1.27/kernel/trace/ |
D | blktrace.c | 786 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, in blk_add_trace_bio() 890 __blk_add_trace(bt, bio->bi_iter.bi_sector, in blk_add_trace_split() 891 bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, in blk_add_trace_split() 924 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, in blk_add_trace_bio_remap()
|
/linux-4.1.27/fs/ext4/ |
D | page-io.c | 316 sector_t bi_sector = bio->bi_iter.bi_sector; in ext4_end_bio() 385 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in io_submit_init_bio()
|
D | readpage.c | 294 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); in ext4_mpage_readpages()
|
D | crypto.c | 524 bio->bi_iter.bi_sector = pblk; in ext4_encrypted_zeroout()
|
/linux-4.1.27/arch/m68k/emu/ |
D | nfblock.c | 68 sector_t sec = bio->bi_iter.bi_sector; in nfhd_make_request()
|
/linux-4.1.27/fs/nfs/blocklayout/ |
D | blocklayout.c | 110 rw == READ ? "read" : "write", bio->bi_iter.bi_size, in bl_submit_bio() 111 (unsigned long long)bio->bi_iter.bi_sector); in bl_submit_bio() 131 bio->bi_iter.bi_sector = disk_sector; in bl_alloc_init_bio()
|
/linux-4.1.27/fs/hfsplus/ |
D | wrapper.c | 66 bio->bi_iter.bi_sector = sector; in hfsplus_submit_bio()
|
/linux-4.1.27/fs/ |
D | buffer.c | 2979 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) in guard_bio_eod() 2982 maxsector -= bio->bi_iter.bi_sector; in guard_bio_eod() 2983 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) in guard_bio_eod() 2987 truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); in guard_bio_eod() 2990 bio->bi_iter.bi_size -= truncated_bytes; in guard_bio_eod() 3023 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in _submit_bh() 3030 bio->bi_iter.bi_size = bh->b_size; in _submit_bh()
|
D | direct-io.c | 368 bio->bi_iter.bi_sector = first_sector; in dio_bio_alloc() 711 sdio->bio->bi_iter.bi_size; in dio_send_cur_page()
|
D | mpage.c | 82 bio->bi_iter.bi_sector = first_sector; in mpage_alloc()
|
/linux-4.1.27/arch/powerpc/sysdev/ |
D | axonram.c | 116 phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << in axon_ram_make_request()
|
/linux-4.1.27/drivers/target/ |
D | target_core_iblock.c | 357 bio->bi_iter.bi_sector = lba; in iblock_get_bio() 653 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; in iblock_alloc_bip()
|
/linux-4.1.27/arch/xtensa/platforms/iss/ |
D | simdisk.c | 108 sector_t sector = bio->bi_iter.bi_sector; in simdisk_xfer_bio()
|
/linux-4.1.27/drivers/block/drbd/ |
D | drbd_req.c | 67 req->i.sector = bio_src->bi_iter.bi_sector; in drbd_req_new() 68 req->i.size = bio_src->bi_iter.bi_size; in drbd_req_new() 1505 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); in drbd_make_request()
|
D | drbd_actlog.c | 156 bio->bi_iter.bi_sector = sector; in _drbd_md_sync_page_io()
|
D | drbd_receiver.c | 1418 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request() 1429 bio->bi_iter.bi_size = data_size; in drbd_submit_peer_request() 1443 len, (uint64_t)bio->bi_iter.bi_sector); in drbd_submit_peer_request() 1729 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); in recv_dless_read()
|
D | drbd_bitmap.c | 1025 bio->bi_iter.bi_sector = on_disk_sector; in bm_page_io_async()
|
/linux-4.1.27/fs/nilfs2/ |
D | segbuf.c | 419 bio->bi_iter.bi_sector = in nilfs_alloc_seg_bio()
|
/linux-4.1.27/Documentation/block/ |
D | biovecs.txt | 37 wrapper around bio_advance_iter() that operates on bio->bi_iter, and also
|
D | biodoc.txt | 438 struct bvec_iter bi_iter; /* current index into bio_vec array */ 464 - Drivers which can't process a large bio in one shot can use the bi_iter 573 buffer, bio, bio->bi_iter fields too.
|
/linux-4.1.27/fs/xfs/ |
D | xfs_buf.c | 1172 bio->bi_iter.bi_sector = sector; in xfs_buf_ioapply_map() 1194 if (likely(bio->bi_iter.bi_size)) { in xfs_buf_ioapply_map()
|
D | xfs_aops.c | 390 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in xfs_alloc_ioend_bio()
|
/linux-4.1.27/fs/gfs2/ |
D | lops.c | 276 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); in gfs2_log_alloc_bio()
|
D | ops_fstype.c | 243 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); in gfs2_read_super()
|
/linux-4.1.27/drivers/block/aoe/ |
D | aoecmd.c | 901 buf->iter = bio->bi_iter; in bufinit() 1114 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size)); in aoe_end_request()
|
/linux-4.1.27/drivers/block/xen-blkback/ |
D | blkback.c | 1337 bio->bi_iter.bi_sector = preq.sector_number; in dispatch_rw_block_io()
|
/linux-4.1.27/net/ceph/ |
D | messenger.c | 841 cursor->bvec_iter = bio->bi_iter; in ceph_msg_data_bio_cursor_init() 902 cursor->bvec_iter = bio->bi_iter; in ceph_msg_data_bio_advance()
|
/linux-4.1.27/fs/f2fs/ |
D | data.c | 87 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); in __bio_alloc()
|
/linux-4.1.27/fs/ocfs2/cluster/ |
D | heartbeat.c | 416 bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); in o2hb_setup_one_bio()
|
/linux-4.1.27/drivers/scsi/osd/ |
D | osd_initiator.c | 734 or->in.total_bytes = bio->bi_iter.bi_size; in _osd_req_list_objects()
|