/linux-4.1.27/drivers/md/ |
D | dm-linear.c | 75 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector) in linear_map_sector() argument 79 return lc->start + dm_target_offset(ti, bi_sector); in linear_map_sector() 88 bio->bi_iter.bi_sector = in linear_map_bio() 89 linear_map_sector(ti, bio->bi_iter.bi_sector); in linear_map_bio() 143 bvm->bi_sector = linear_map_sector(ti, bvm->bi_sector); in linear_merge()
|
D | linear.c | 69 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in linear_mergeable_bvec() 78 bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors; in linear_mergeable_bvec() 272 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); in linear_make_request() 278 if (unlikely(bio->bi_iter.bi_sector >= end_sector || in linear_make_request() 279 bio->bi_iter.bi_sector < start_sector)) in linear_make_request() 287 bio->bi_iter.bi_sector, in linear_make_request() 294 split->bi_iter.bi_sector = split->bi_iter.bi_sector - in linear_make_request() 311 (unsigned long long)bio->bi_iter.bi_sector, in linear_make_request()
|
D | faulty.c | 78 b->bi_iter.bi_sector = bio->bi_iter.bi_sector; in faulty_fail() 188 if (check_sector(conf, bio->bi_iter.bi_sector, in make_request() 192 add_sector(conf, bio->bi_iter.bi_sector, in make_request() 200 if (check_sector(conf, bio->bi_iter.bi_sector, in make_request() 206 add_sector(conf, bio->bi_iter.bi_sector, in make_request() 211 add_sector(conf, bio->bi_iter.bi_sector, in make_request()
|
D | dm-flakey.c | 238 static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) in flakey_map_sector() argument 242 return fc->start + dm_target_offset(ti, bi_sector); in flakey_map_sector() 251 bio->bi_iter.bi_sector = in flakey_map_bio() 252 flakey_map_sector(ti, bio->bi_iter.bi_sector); in flakey_map_bio() 270 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); in corrupt_bio_data() 400 bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector); in flakey_merge()
|
D | dm-stripe.c | 264 stripe_map_range_sector(sc, bio->bi_iter.bi_sector, in stripe_map_range() 270 bio->bi_iter.bi_sector = begin + in stripe_map_range() 300 stripe_map_sector(sc, bio->bi_iter.bi_sector, in stripe_map() 301 &stripe, &bio->bi_iter.bi_sector); in stripe_map() 303 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; in stripe_map() 419 sector_t bvm_sector = bvm->bi_sector; in stripe_merge() 430 bvm->bi_sector = sc->stripe[stripe].physical_start + bvm_sector; in stripe_merge()
|
D | multipath.c | 101 (unsigned long long)bio->bi_iter.bi_sector); in multipath_end_request() 135 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; in multipath_make_request() 351 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; in multipathd() 357 (unsigned long long)bio->bi_iter.bi_sector); in multipathd() 363 (unsigned long long)bio->bi_iter.bi_sector); in multipathd() 365 bio->bi_iter.bi_sector += in multipathd()
|
D | raid1.c | 70 sector_t bi_sector); 241 sector_t bi_sector = bio->bi_iter.bi_sector; in call_bio_endio() local 265 allow_barrier(conf, start_next_window, bi_sector); in call_bio_endio() 277 (unsigned long long) bio->bi_iter.bi_sector, in raid_end_bio_io() 477 (unsigned long long) mbio->bi_iter.bi_sector, in raid1_end_write_request() 716 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in raid1_mergeable_bvec() 729 bvm->bi_sector = sector + in raid1_mergeable_bvec() 880 <= bio->bi_iter.bi_sector)) in need_to_wait_for_sync() 917 if (bio->bi_iter.bi_sector >= in wait_barrier() 925 <= bio->bi_iter.bi_sector) in wait_barrier() [all …]
|
D | dm-delay.c | 280 bio->bi_iter.bi_sector = dc->start_write + in delay_map() 281 dm_target_offset(ti, bio->bi_iter.bi_sector); in delay_map() 287 bio->bi_iter.bi_sector = dc->start_read + in delay_map() 288 dm_target_offset(ti, bio->bi_iter.bi_sector); in delay_map()
|
D | dm-stats.c | 496 sector_t bi_sector, sector_t end_sector, in __dm_stat_bio() argument 503 if (end_sector <= s->start || bi_sector >= s->end) in __dm_stat_bio() 505 if (unlikely(bi_sector < s->start)) { in __dm_stat_bio() 509 rel_sector = bi_sector - s->start; in __dm_stat_bio() 510 todo = end_sector - bi_sector; in __dm_stat_bio() 534 sector_t bi_sector, unsigned bi_sectors, bool end, in dm_stats_account_io() argument 544 end_sector = bi_sector + bi_sectors; in dm_stats_account_io() 553 (bi_sector == (ACCESS_ONCE(last->last_sector) && in dm_stats_account_io() 564 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration, stats_aux); in dm_stats_account_io()
|
D | raid0.c | 353 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in raid0_mergeable_bvec() 385 bvm->bi_sector = sector_offset + zone->dev_start + in raid0_mergeable_bvec() 501 ((bio->bi_iter.bi_sector & (chunk_sects-1)) in is_io_in_chunk_boundary() 504 sector_t sector = bio->bi_iter.bi_sector; in is_io_in_chunk_boundary() 522 sector_t sector = bio->bi_iter.bi_sector; in raid0_make_request() 531 sector = bio->bi_iter.bi_sector; in raid0_make_request() 543 split->bi_iter.bi_sector = sector + zone->dev_start + in raid0_make_request()
|
D | raid10.c | 690 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); in raid10_mergeable_bvec() 737 bvm->bi_sector = r10_bio->devs[s].addr in raid10_mergeable_bvec() 749 bvm->bi_sector = r10_bio->devs[s].addr in raid10_mergeable_bvec() 1173 bio->bi_iter.bi_sector < conf->reshape_progress && in __make_request() 1174 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in __make_request() 1180 conf->reshape_progress <= bio->bi_iter.bi_sector || in __make_request() 1181 conf->reshape_progress >= bio->bi_iter.bi_sector + in __make_request() 1188 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in __make_request() 1189 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in __make_request() 1190 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in __make_request() [all …]
|
D | dm-verity.c | 163 static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector) in verity_map_sector() argument 165 return v->data_start + dm_target_offset(v->ti, bi_sector); in verity_map_sector() 552 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); in verity_map() 554 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & in verity_map() 573 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); in verity_map() 661 bvm->bi_sector = verity_map_sector(v, bvm->bi_sector); in verity_merge()
|
D | dm-stats.h | 32 sector_t bi_sector, unsigned bi_sectors, bool end,
|
D | dm-log-writes.c | 204 bio->bi_iter.bi_sector = sector; in write_metadata() 269 bio->bi_iter.bi_sector = sector; in log_one_block() 291 bio->bi_iter.bi_sector = sector; in log_one_block() 601 block->sector = bio->bi_iter.bi_sector; in log_writes_map() 741 bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector); in log_writes_merge()
|
D | dm-thin.c | 553 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block() 566 sector_t bi_sector = bio->bi_iter.bi_sector; in remap() local 570 bio->bi_iter.bi_sector = in remap() 572 (bi_sector & (pool->sectors_per_block - 1)); in remap() 574 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap() 575 sector_div(bi_sector, pool->sectors_per_block); in remap() 1578 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell() 1580 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell() 1716 sector_t bi_sector = bio->bi_iter.bi_sector; in __thin_bio_rb_add() local 1724 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector) in __thin_bio_rb_add() [all …]
|
D | raid5.c | 141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) in r5_next_bio() 1009 bi->bi_iter.bi_sector = (sh->sector in ops_run_io() 1012 bi->bi_iter.bi_sector = (sh->sector in ops_run_io() 1061 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io() 1064 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io() 1115 if (bio->bi_iter.bi_sector >= sector) in async_copy_data() 1116 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; in async_copy_data() 1118 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; in async_copy_data() 1190 while (rbi && rbi->bi_iter.bi_sector < in ops_complete_biofill() 1227 while (rbi && rbi->bi_iter.bi_sector < in ops_run_biofill() [all …]
|
D | dm-raid1.c | 435 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; in mirror_available() 447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); in map_sector() 453 bio->bi_iter.bi_sector = map_sector(m, bio); in map_bio() 562 m = choose_mirror(ms, bio->bi_iter.bi_sector); in do_reads() 1193 m = choose_mirror(ms, bio->bi_iter.bi_sector); in mirror_map()
|
D | dm-crypt.c | 1143 clone->bi_iter.bi_sector = cc->start + io->sector; in kcryptd_io_read() 1251 clone->bi_iter.bi_sector = cc->start + io->sector; in kcryptd_crypt_write_io_submit() 1896 bio->bi_iter.bi_sector = cc->start + in crypt_map() 1897 dm_target_offset(ti, bio->bi_iter.bi_sector); in crypt_map() 1902 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); in crypt_map() 2031 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); in crypt_merge()
|
D | dm-switch.c | 322 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); in switch_map() 326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; in switch_map()
|
D | dm-snap.c | 1653 bio->bi_iter.bi_sector = in remap_exception() 1656 (bio->bi_iter.bi_sector & s->store->chunk_mask); in remap_exception() 1674 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_map() 1792 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_merge_map() 2168 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); in do_origin() 2275 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); in origin_map()
|
D | dm.c | 656 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in start_io_acct() 671 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in end_io_acct() 1446 sector = clone->bi_iter.bi_sector; in __map_bio() 1477 bio->bi_iter.bi_sector = sector; in bio_setup_sector() 1494 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); in clone_bio() 1693 ci.sector = bio->bi_iter.bi_sector; in __split_and_process_bio() 1729 ti = dm_table_find_target(map, bvm->bi_sector); in dm_merge_bvec() 1736 max_sectors = min(max_io_len(bvm->bi_sector, ti), in dm_merge_bvec()
|
D | dm-cache-target.c | 719 sector_t bi_sector = bio->bi_iter.bi_sector; in remap_to_cache() local 724 bio->bi_iter.bi_sector = in remap_to_cache() 726 sector_div(bi_sector, cache->sectors_per_block); in remap_to_cache() 728 bio->bi_iter.bi_sector = in remap_to_cache() 730 (bi_sector & (cache->sectors_per_block - 1)); in remap_to_cache() 770 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block() 1120 sector_t sb = bio->bi_iter.bi_sector; in calc_discard_block_range()
|
D | dm-io.c | 321 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); in do_region()
|
D | dm-region-hash.c | 129 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - in dm_rh_bio_to_region()
|
D | dm-cache-policy-mq.c | 76 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) in iot_update_stats()
|
D | dm-era-target.c | 1186 sector_t block_nr = bio->bi_iter.bi_sector; in get_block()
|
D | dm-bufio.c | 604 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; in use_inline_bio()
|
D | md.c | 762 bio->bi_iter.bi_sector = sector; in md_super_write() 786 bio->bi_iter.bi_sector = sector + rdev->sb_start; in sync_page_io() 790 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; in sync_page_io() 792 bio->bi_iter.bi_sector = sector + rdev->data_offset; in sync_page_io()
|
/linux-4.1.27/drivers/md/bcache/ |
D | request.c | 121 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 130 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate() 134 &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); in bch_data_invalidate() 224 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); in bch_data_insert_start() 383 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || in check_should_bypass() 407 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) in check_should_bypass() 408 if (i->last == bio->bi_iter.bi_sector && in check_should_bypass() 509 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) in cache_lookup_fn() 513 KEY_START(k) > bio->bi_iter.bi_sector) { in cache_lookup_fn() 517 KEY_START(k) - bio->bi_iter.bi_sector) in cache_lookup_fn() [all …]
|
D | io.c | 27 .bi_sector = bio->bi_iter.bi_sector, in bch_bio_max_sectors() 137 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in __bch_submit_bbio()
|
D | debug.c | 53 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in bch_btree_verify() 132 (uint64_t) bio->bi_iter.bi_sector); in bch_data_verify()
|
D | writeback.h | 53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, in should_writeback()
|
D | writeback.c | 187 io->bio.bi_iter.bi_sector = KEY_START(&w->key); in write_dirty() 256 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); in read_dirty()
|
D | journal.c | 55 bio->bi_iter.bi_sector = bucket + offset; in journal_read_bucket() 452 bio->bi_iter.bi_sector = bucket_to_sector(ca->set, in do_journal_discard() 625 bio->bi_iter.bi_sector = PTR_OFFSET(k, i); in journal_write_unlocked()
|
D | movinggc.c | 103 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); in write_moving()
|
D | super.c | 237 bio->bi_iter.bi_sector = SB_SECTOR; in __write_super() 531 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io()
|
/linux-4.1.27/fs/logfs/ |
D | dev_bdev.c | 30 bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); in sync_request() 97 bio->bi_iter.bi_sector = ofs >> 9; in __bdev_writeseg() 124 bio->bi_iter.bi_sector = ofs >> 9; in __bdev_writeseg() 189 bio->bi_iter.bi_sector = ofs >> 9; in do_erase() 210 bio->bi_iter.bi_sector = ofs >> 9; in do_erase()
|
/linux-4.1.27/include/trace/events/ |
D | block.h | 273 __entry->sector = bio->bi_iter.bi_sector; 310 __entry->sector = bio->bi_iter.bi_sector; 338 __entry->sector = bio->bi_iter.bi_sector; 405 __entry->sector = bio->bi_iter.bi_sector; 433 __entry->sector = bio ? bio->bi_iter.bi_sector : 0; 568 __entry->sector = bio->bi_iter.bi_sector; 609 __entry->sector = bio->bi_iter.bi_sector;
|
D | bcache.h | 27 __entry->sector = bio->bi_iter.bi_sector; 28 __entry->orig_sector = bio->bi_iter.bi_sector - 16; 102 __entry->sector = bio->bi_iter.bi_sector; 137 __entry->sector = bio->bi_iter.bi_sector; 168 __entry->sector = bio->bi_iter.bi_sector;
|
D | f2fs.h | 757 __entry->sector = bio->bi_iter.bi_sector;
|
/linux-4.1.27/mm/ |
D | page_io.c | 34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); in get_swap_bio() 35 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; in get_swap_bio() 65 (unsigned long long)bio->bi_iter.bi_sector); in end_swap_bio_write() 83 (unsigned long long)bio->bi_iter.bi_sector); in end_swap_bio_read()
|
/linux-4.1.27/block/ |
D | blk-lib.c | 111 bio->bi_iter.bi_sector = sector; in blkdev_issue_discard() 185 bio->bi_iter.bi_sector = sector; in blkdev_issue_write_same() 251 bio->bi_iter.bi_sector = sector; in __blkdev_issue_zeroout()
|
D | bio.c | 650 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; in bio_clone_bioset() 738 .bi_sector = bio->bi_iter.bi_sector, in __bio_add_page() 800 .bi_sector = bio->bi_iter.bi_sector, in __bio_add_page() 871 max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); in bio_add_page()
|
D | blk-merge.c | 605 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge() 607 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge()
|
D | blk-core.c | 1500 req->__sector = bio->bi_iter.bi_sector; in bio_attempt_front_merge() 1578 req->__sector = bio->bi_iter.bi_sector; in init_request_from_bio() 1702 bio->bi_iter.bi_sector += p->start_sect; in blk_partition_remap() 1707 bio->bi_iter.bi_sector - p->start_sect); in blk_partition_remap() 1773 sector_t sector = bio->bi_iter.bi_sector; in bio_check_eod() 1809 (long long) bio->bi_iter.bi_sector); in generic_make_request_checks() 1993 (unsigned long long)bio->bi_iter.bi_sector, in submit_bio()
|
D | blk-flush.c | 483 *error_sector = bio->bi_iter.bi_sector; in blkdev_issue_flush()
|
D | bio-integrity.c | 299 bip_set_seed(bip, bio->bi_iter.bi_sector); in bio_integrity_prep()
|
D | elevator.c | 443 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); in elv_merge()
|
/linux-4.1.27/drivers/s390/block/ |
D | xpram.c | 193 if ((bio->bi_iter.bi_sector & 7) != 0 || in xpram_make_request() 200 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) in xpram_make_request() 202 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; in xpram_make_request()
|
D | dcssblk.c | 833 if ((bio->bi_iter.bi_sector & 7) != 0 || in dcssblk_make_request() 857 index = (bio->bi_iter.bi_sector >> 3); in dcssblk_make_request()
|
/linux-4.1.27/drivers/block/ |
D | pktcdvd.c | 654 if (s <= tmp->bio->bi_iter.bi_sector) in pkt_rbtree_find() 663 if (s > tmp->bio->bi_iter.bi_sector) { in pkt_rbtree_find() 668 BUG_ON(s > tmp->bio->bi_iter.bi_sector); in pkt_rbtree_find() 679 sector_t s = node->bio->bi_iter.bi_sector; in pkt_rbtree_insert() 685 if (s < tmp->bio->bi_iter.bi_sector) in pkt_rbtree_insert() 864 if (bio && (bio->bi_iter.bi_sector == in pkt_iosched_process_queue() 987 (unsigned long long)bio->bi_iter.bi_sector, err); in pkt_end_io_read() 1035 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / in pkt_gather_data() 1063 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); in pkt_gather_data() 1160 pkt->bio->bi_iter.bi_sector = new_sector; in pkt_start_recovery() [all …]
|
D | pmem.c | 78 sector = bio->bi_iter.bi_sector; in pmem_make_request()
|
D | umem.c | 389 desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9); in add_bio() 531 (unsigned long long)bio->bi_iter.bi_sector, in mm_make_request()
|
D | brd.c | 336 sector = bio->bi_iter.bi_sector; in brd_make_request()
|
D | ps3disk.c | 106 iter.bio->bi_iter.bi_sector); in ps3disk_scatter_gather()
|
D | ps3vram.c | 556 loff_t offset = bio->bi_iter.bi_sector << 9; in ps3vram_do_bio()
|
D | rbd.c | 2447 bio_list->bi_iter.bi_sector << SECTOR_SHIFT); in rbd_img_request_fill() 3477 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector; in rbd_merge_bvec()
|
D | floppy.c | 3814 bio.bi_iter.bi_sector = 0; in __floppy_read_block_0()
|
/linux-4.1.27/fs/ext4/ |
D | page-io.c | 316 sector_t bi_sector = bio->bi_iter.bi_sector; in ext4_end_bio() local 332 bi_sector >> (inode->i_blkbits - 9)); in ext4_end_bio() 385 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in io_submit_init_bio()
|
D | readpage.c | 294 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); in ext4_mpage_readpages()
|
D | crypto.c | 524 bio->bi_iter.bi_sector = pblk; in ext4_encrypted_zeroout()
|
/linux-4.1.27/include/linux/ |
D | bio.h | 100 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) 237 iter->bi_sector += bytes >> 9; in bio_advance_iter() 346 return bip->bip_iter.bi_sector; in bip_get_seed() 352 bip->bip_iter.bi_sector = seed; in bip_set_seed()
|
D | blk_types.h | 32 sector_t bi_sector; /* device address in 512 byte member
|
D | blkdev.h | 244 sector_t bi_sector; member
|
/linux-4.1.27/kernel/power/ |
D | block_io.c | 35 bio->bi_iter.bi_sector = sector; in submit()
|
/linux-4.1.27/drivers/target/ |
D | target_core_iblock.c | 357 bio->bi_iter.bi_sector = lba; in iblock_get_bio() 653 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; in iblock_alloc_bip() 656 (unsigned long long)bip->bip_iter.bi_sector); in iblock_alloc_bip()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
D | lloop.c | 219 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; in do_bio_lustrebacked() 312 (unsigned long long)(*bio)->bi_iter.bi_sector, in loop_get_bio() 347 (unsigned long long)old_bio->bi_iter.bi_sector, in loop_make_request()
|
/linux-4.1.27/fs/btrfs/ |
D | raid56.c | 1066 last_end = (u64)last->bi_iter.bi_sector << 9; in rbio_add_io_page() 1089 bio->bi_iter.bi_sector = disk_start >> 9; in rbio_add_io_page() 1145 start = (u64)bio->bi_iter.bi_sector << 9; in index_rbio_pages() 1332 u64 physical = bio->bi_iter.bi_sector; in find_bio_stripe() 1359 u64 logical = bio->bi_iter.bi_sector; in find_logical_bio_stripe() 1662 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; in plug_cmp() 1663 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; in plug_cmp()
|
D | compression.c | 174 (u64)bio->bi_iter.bi_sector << 9); in end_compressed_bio_read() 514 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { in add_ra_bio_pages() 581 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; in btrfs_submit_compressed_read()
|
D | file-item.c | 220 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; in __btrfs_lookup_bio_sums() 455 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; in btrfs_csum_one_bio() 478 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) + in btrfs_csum_one_bio()
|
D | scrub.c | 1455 bio->bi_iter.bi_sector = page->logical >> 9; in scrub_submit_raid56_bio_wait() 1514 bio->bi_iter.bi_sector = page->physical >> 9; in scrub_recheck_block() 1642 bio->bi_iter.bi_sector = page_bad->physical >> 9; in scrub_repair_page_from_good_copy() 1742 bio->bi_iter.bi_sector = sbio->physical >> 9; in scrub_add_page_to_wr_bio() 2149 bio->bi_iter.bi_sector = sbio->physical >> 9; in scrub_add_page_to_rd_bio() 2704 bio->bi_iter.bi_sector = sparity->logic_start >> 9; in scrub_parity_check_and_repair() 4213 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; in write_page_nocow()
|
D | extent_io.c | 2059 bio->bi_iter.bi_sector = sector; in repair_io_failure() 2367 bio->bi_iter.bi_sector = failrec->logical >> 9; in btrfs_create_repair_bio() 2570 "mirror=%u\n", (u64)bio->bi_iter.bi_sector, err, in end_bio_extent_readpage() 2708 bio->bi_iter.bi_sector = first_sector; in btrfs_bio_alloc() 2812 contig = bio->bi_iter.bi_sector == sector; in submit_extent_page()
|
D | check-integrity.c | 1685 bio->bi_iter.bi_sector = dev_bytenr >> 9; in btrfsic_read_block() 2982 dev_bytenr = 512 * bio->bi_iter.bi_sector; in __btrfsic_submit_bio() 2990 (unsigned long long)bio->bi_iter.bi_sector, in __btrfsic_submit_bio()
|
D | volumes.c | 5728 .bi_sector = sector, in bio_size_ok() 5757 bio->bi_iter.bi_sector = physical >> 9; in submit_stripe_bio() 5766 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, in submit_stripe_bio() 5823 bio->bi_iter.bi_sector = logical >> 9; in bbio_error() 5834 u64 logical = (u64)bio->bi_iter.bi_sector << 9; in btrfs_map_bio()
|
D | inode.c | 1798 u64 logical = (u64)bio->bi_iter.bi_sector << 9; in btrfs_merge_bio_hook() 7978 (unsigned long long)bio->bi_iter.bi_sector, in btrfs_end_dio_bio() 8107 u64 start_sector = orig_bio->bi_iter.bi_sector; in btrfs_submit_direct_hook() 8241 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; in btrfs_submit_direct()
|
/linux-4.1.27/arch/m68k/emu/ |
D | nfblock.c | 68 sector_t sec = bio->bi_iter.bi_sector; in nfhd_make_request()
|
/linux-4.1.27/Documentation/block/ |
D | biovecs.txt | 12 update bi_sector and bi_size, and advance bi_idx to the next biovec. If it 17 partially complete a bio is segregated into struct bvec_iter: bi_sector,
|
/linux-4.1.27/fs/hfsplus/ |
D | wrapper.c | 66 bio->bi_iter.bi_sector = sector; in hfsplus_submit_bio()
|
/linux-4.1.27/drivers/block/rsxx/ |
D | dev.c | 190 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size); in rsxx_make_request()
|
D | dma.c | 700 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ in rsxx_dma_queue_bio()
|
/linux-4.1.27/arch/powerpc/sysdev/ |
D | axonram.c | 116 phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << in axon_ram_make_request()
|
/linux-4.1.27/fs/jfs/ |
D | jfs_metapage.c | 434 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); in metapage_writepage() 513 bio->bi_iter.bi_sector = in metapage_readpage()
|
D | jfs_logmgr.c | 2000 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmRead() 2146 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmStartIO()
|
/linux-4.1.27/drivers/block/zram/ |
D | zram_drv.c | 929 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; in __zram_make_request() 930 offset = (bio->bi_iter.bi_sector & in __zram_make_request() 986 if (!valid_io_request(zram, bio->bi_iter.bi_sector, in zram_make_request()
|
/linux-4.1.27/arch/xtensa/platforms/iss/ |
D | simdisk.c | 108 sector_t sector = bio->bi_iter.bi_sector; in simdisk_xfer_bio()
|
/linux-4.1.27/fs/nfs/blocklayout/ |
D | blocklayout.c | 111 (unsigned long long)bio->bi_iter.bi_sector); in bl_submit_bio() 131 bio->bi_iter.bi_sector = disk_sector; in bl_alloc_init_bio()
|
/linux-4.1.27/kernel/trace/ |
D | blktrace.c | 786 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, in blk_add_trace_bio() 890 __blk_add_trace(bt, bio->bi_iter.bi_sector, in blk_add_trace_split() 924 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, in blk_add_trace_bio_remap()
|
/linux-4.1.27/fs/nilfs2/ |
D | segbuf.c | 419 bio->bi_iter.bi_sector = in nilfs_alloc_seg_bio()
|
/linux-4.1.27/fs/ |
D | mpage.c | 82 bio->bi_iter.bi_sector = first_sector; in mpage_alloc()
|
D | buffer.c | 2979 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) in guard_bio_eod() 2982 maxsector -= bio->bi_iter.bi_sector; in guard_bio_eod() 3023 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in _submit_bh()
|
D | direct-io.c | 368 bio->bi_iter.bi_sector = first_sector; in dio_bio_alloc()
|
/linux-4.1.27/fs/gfs2/ |
D | lops.c | 276 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); in gfs2_log_alloc_bio()
|
D | ops_fstype.c | 243 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); in gfs2_read_super()
|
/linux-4.1.27/drivers/block/drbd/ |
D | drbd_actlog.c | 156 bio->bi_iter.bi_sector = sector; in _drbd_md_sync_page_io()
|
D | drbd_bitmap.c | 1025 bio->bi_iter.bi_sector = on_disk_sector; in bm_page_io_async()
|
D | drbd_req.c | 67 req->i.sector = bio_src->bi_iter.bi_sector; in drbd_req_new()
|
D | drbd_receiver.c | 1418 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request() 1443 len, (uint64_t)bio->bi_iter.bi_sector); in drbd_submit_peer_request() 1729 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); in recv_dless_read()
|
/linux-4.1.27/drivers/block/xen-blkback/ |
D | blkback.c | 1337 bio->bi_iter.bi_sector = preq.sector_number; in dispatch_rw_block_io()
|
/linux-4.1.27/fs/xfs/ |
D | xfs_buf.c | 1172 bio->bi_iter.bi_sector = sector; in xfs_buf_ioapply_map()
|
D | xfs_aops.c | 390 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in xfs_alloc_ioend_bio()
|
/linux-4.1.27/drivers/block/aoe/ |
D | aoecmd.c | 344 put_lba(ah, f->iter.bi_sector); in ata_rw_frameinit()
|
/linux-4.1.27/fs/f2fs/ |
D | data.c | 87 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); in __bio_alloc()
|
/linux-4.1.27/fs/ocfs2/cluster/ |
D | heartbeat.c | 416 bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); in o2hb_setup_one_bio()
|