Home
last modified time | relevance | path

Searched refs:bi_sector (Results 1 – 109 of 109) sorted by relevance

/linux-4.4.14/drivers/md/
Dlinear.c230 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); in linear_make_request()
236 if (unlikely(bio->bi_iter.bi_sector >= end_sector || in linear_make_request()
237 bio->bi_iter.bi_sector < start_sector)) in linear_make_request()
245 bio->bi_iter.bi_sector, in linear_make_request()
252 split->bi_iter.bi_sector = split->bi_iter.bi_sector - in linear_make_request()
269 (unsigned long long)bio->bi_iter.bi_sector, in linear_make_request()
Ddm-linear.c78 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector) in linear_map_sector() argument
82 return lc->start + dm_target_offset(ti, bi_sector); in linear_map_sector()
91 bio->bi_iter.bi_sector = in linear_map_bio()
92 linear_map_sector(ti, bio->bi_iter.bi_sector); in linear_map_bio()
Dfaulty.c78 b->bi_iter.bi_sector = bio->bi_iter.bi_sector; in faulty_fail()
188 if (check_sector(conf, bio->bi_iter.bi_sector, in make_request()
192 add_sector(conf, bio->bi_iter.bi_sector, in make_request()
200 if (check_sector(conf, bio->bi_iter.bi_sector, in make_request()
206 add_sector(conf, bio->bi_iter.bi_sector, in make_request()
211 add_sector(conf, bio->bi_iter.bi_sector, in make_request()
Ddm-flakey.c240 static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) in flakey_map_sector() argument
244 return fc->start + dm_target_offset(ti, bi_sector); in flakey_map_sector()
253 bio->bi_iter.bi_sector = in flakey_map_bio()
254 flakey_map_sector(ti, bio->bi_iter.bi_sector); in flakey_map_bio()
272 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); in corrupt_bio_data()
Dmultipath.c101 (unsigned long long)bio->bi_iter.bi_sector); in multipath_end_request()
135 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; in multipath_make_request()
341 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; in multipathd()
347 (unsigned long long)bio->bi_iter.bi_sector); in multipathd()
353 (unsigned long long)bio->bi_iter.bi_sector); in multipathd()
355 bio->bi_iter.bi_sector += in multipathd()
Draid1.c70 sector_t bi_sector);
243 sector_t bi_sector = bio->bi_iter.bi_sector; in call_bio_endio() local
268 allow_barrier(conf, start_next_window, bi_sector); in call_bio_endio()
280 (unsigned long long) bio->bi_iter.bi_sector, in raid_end_bio_io()
479 (unsigned long long) mbio->bi_iter.bi_sector, in raid1_end_write_request()
849 <= bio->bi_iter.bi_sector)) in need_to_wait_for_sync()
886 if (bio->bi_iter.bi_sector >= conf->next_resync) { in wait_barrier()
893 <= bio->bi_iter.bi_sector) in wait_barrier()
907 sector_t bi_sector) in allow_barrier() argument
916 <= bi_sector) in allow_barrier()
[all …]
Ddm-stripe.c266 stripe_map_range_sector(sc, bio->bi_iter.bi_sector, in stripe_map_range()
272 bio->bi_iter.bi_sector = begin + in stripe_map_range()
302 stripe_map_sector(sc, bio->bi_iter.bi_sector, in stripe_map()
303 &stripe, &bio->bi_iter.bi_sector); in stripe_map()
305 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; in stripe_map()
Ddm-delay.c287 bio->bi_iter.bi_sector = dc->start_write + in delay_map()
288 dm_target_offset(ti, bio->bi_iter.bi_sector); in delay_map()
294 bio->bi_iter.bi_sector = dc->start_read + in delay_map()
295 dm_target_offset(ti, bio->bi_iter.bi_sector); in delay_map()
Draid10.c1084 bio->bi_iter.bi_sector < conf->reshape_progress && in __make_request()
1085 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { in __make_request()
1091 conf->reshape_progress <= bio->bi_iter.bi_sector || in __make_request()
1092 conf->reshape_progress >= bio->bi_iter.bi_sector + in __make_request()
1099 ? (bio->bi_iter.bi_sector < conf->reshape_safe && in __make_request()
1100 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) in __make_request()
1101 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && in __make_request()
1102 bio->bi_iter.bi_sector < conf->reshape_progress))) { in __make_request()
1120 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1149 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, in __make_request()
[all …]
Ddm-stats.h33 sector_t bi_sector, unsigned bi_sectors, bool end,
Draid0.c446 ((bio->bi_iter.bi_sector & (chunk_sects-1)) in is_io_in_chunk_boundary()
449 sector_t sector = bio->bi_iter.bi_sector; in is_io_in_chunk_boundary()
467 sector_t sector = bio->bi_iter.bi_sector; in raid0_make_request()
476 sector = bio->bi_iter.bi_sector; in raid0_make_request()
488 split->bi_iter.bi_sector = sector + zone->dev_start + in raid0_make_request()
Ddm-stats.c588 sector_t bi_sector, sector_t end_sector, in __dm_stat_bio() argument
595 if (end_sector <= s->start || bi_sector >= s->end) in __dm_stat_bio()
597 if (unlikely(bi_sector < s->start)) { in __dm_stat_bio()
601 rel_sector = bi_sector - s->start; in __dm_stat_bio()
602 todo = end_sector - bi_sector; in __dm_stat_bio()
626 sector_t bi_sector, unsigned bi_sectors, bool end, in dm_stats_account_io() argument
638 end_sector = bi_sector + bi_sectors; in dm_stats_account_io()
647 (bi_sector == (ACCESS_ONCE(last->last_sector) && in dm_stats_account_io()
666 __dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux); in dm_stats_account_io()
Ddm-verity.c159 static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector) in verity_map_sector() argument
161 return v->data_start + dm_target_offset(v->ti, bi_sector); in verity_map_sector()
549 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); in verity_map()
551 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & in verity_map()
570 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); in verity_map()
Ddm-thin.c365 bio->bi_iter.bi_sector = sector; in __blkdev_issue_discard_async()
638 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
655 sector_t b = bio->bi_iter.bi_sector; in get_bio_block_range()
679 sector_t bi_sector = bio->bi_iter.bi_sector; in remap() local
683 bio->bi_iter.bi_sector = in remap()
685 (bi_sector & (pool->sectors_per_block - 1)); in remap()
687 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
688 sector_div(bi_sector, pool->sectors_per_block); in remap()
1796 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell()
1798 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
[all …]
Ddm-log-writes.c204 bio->bi_iter.bi_sector = sector; in write_metadata()
268 bio->bi_iter.bi_sector = sector; in log_one_block()
289 bio->bi_iter.bi_sector = sector; in log_one_block()
603 block->sector = bio->bi_iter.bi_sector; in log_writes_map()
Draid5.c141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) in r5_next_bio()
1013 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1016 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1065 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1068 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1119 if (bio->bi_iter.bi_sector >= sector) in async_copy_data()
1120 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; in async_copy_data()
1122 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; in async_copy_data()
1194 while (rbi && rbi->bi_iter.bi_sector < in ops_complete_biofill()
1229 while (rbi && rbi->bi_iter.bi_sector < in ops_run_biofill()
[all …]
Ddm-raid1.c449 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; in mirror_available()
461 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); in map_sector()
467 bio->bi_iter.bi_sector = map_sector(m, bio); in map_bio()
578 m = choose_mirror(ms, bio->bi_iter.bi_sector); in do_reads()
1228 m = choose_mirror(ms, bio->bi_iter.bi_sector); in mirror_map()
Ddm-switch.c322 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); in switch_map()
326 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; in switch_map()
Ddm-snap.c1671 bio->bi_iter.bi_sector = in remap_exception()
1674 (bio->bi_iter.bi_sector & s->store->chunk_mask); in remap_exception()
1692 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_map()
1814 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_merge_map()
2192 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); in do_origin()
2299 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); in origin_map()
Ddm-crypt.c1157 clone->bi_iter.bi_sector = cc->start + io->sector; in kcryptd_io_read()
1263 clone->bi_iter.bi_sector = cc->start + io->sector; in kcryptd_crypt_write_io_submit()
1918 bio->bi_iter.bi_sector = cc->start + in crypt_map()
1919 dm_target_offset(ti, bio->bi_iter.bi_sector); in crypt_map()
1924 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); in crypt_map()
Ddm-cache-target.c772 sector_t bi_sector = bio->bi_iter.bi_sector; in remap_to_cache() local
777 bio->bi_iter.bi_sector = in remap_to_cache()
779 sector_div(bi_sector, cache->sectors_per_block); in remap_to_cache()
781 bio->bi_iter.bi_sector = in remap_to_cache()
783 (bi_sector & (cache->sectors_per_block - 1)); in remap_to_cache()
823 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
1337 sector_t sb = bio->bi_iter.bi_sector; in calc_discard_block_range()
Ddm.c689 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in start_io_acct()
704 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, in end_io_acct()
1486 sector = clone->bi_iter.bi_sector; in __map_bio()
1517 bio->bi_iter.bi_sector = sector; in bio_setup_sector()
1534 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); in clone_bio()
1733 ci.sector = bio->bi_iter.bi_sector; in __split_and_process_bio()
Ddm-io.c322 bio->bi_iter.bi_sector = where->sector + (where->count - remaining); in do_region()
Ddm-region-hash.c129 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - in dm_rh_bio_to_region()
Ddm-cache-policy-mq.c76 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) in iot_update_stats()
Draid5-cache.c262 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; in r5l_bio_alloc()
Ddm-era-target.c1185 sector_t block_nr = bio->bi_iter.bi_sector; in get_block()
Ddm-bufio.c609 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; in use_inline_bio()
Dmd.c747 bio->bi_iter.bi_sector = sector; in md_super_write()
771 bio->bi_iter.bi_sector = sector + rdev->sb_start; in sync_page_io()
775 bio->bi_iter.bi_sector = sector + rdev->new_data_offset; in sync_page_io()
777 bio->bi_iter.bi_sector = sector + rdev->data_offset; in sync_page_io()
/linux-4.4.14/drivers/md/bcache/
Drequest.c124 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate()
133 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate()
137 &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); in bch_data_invalidate()
229 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); in bch_data_insert_start()
389 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || in check_should_bypass()
413 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) in check_should_bypass()
414 if (i->last == bio->bi_iter.bi_sector && in check_should_bypass()
515 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) in cache_lookup_fn()
519 KEY_START(k) > bio->bi_iter.bi_sector) { in cache_lookup_fn()
523 KEY_START(k) - bio->bi_iter.bi_sector) in cache_lookup_fn()
[all …]
Ddebug.c53 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in bch_btree_verify()
132 (uint64_t) bio->bi_iter.bi_sector); in bch_data_verify()
Dwriteback.h53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, in should_writeback()
Dio.c39 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in __bch_submit_bbio()
Dwriteback.c187 io->bio.bi_iter.bi_sector = KEY_START(&w->key); in write_dirty()
256 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); in read_dirty()
Djournal.c55 bio->bi_iter.bi_sector = bucket + offset; in journal_read_bucket()
452 bio->bi_iter.bi_sector = bucket_to_sector(ca->set, in do_journal_discard()
627 bio->bi_iter.bi_sector = PTR_OFFSET(k, i); in journal_write_unlocked()
Dmovinggc.c103 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); in write_moving()
Dsuper.c214 bio->bi_iter.bi_sector = SB_SECTOR; in __write_super()
508 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io()
/linux-4.4.14/fs/logfs/
Ddev_bdev.c30 bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9); in sync_request()
95 bio->bi_iter.bi_sector = ofs >> 9; in __bdev_writeseg()
122 bio->bi_iter.bi_sector = ofs >> 9; in __bdev_writeseg()
185 bio->bi_iter.bi_sector = ofs >> 9; in do_erase()
206 bio->bi_iter.bi_sector = ofs >> 9; in do_erase()
/linux-4.4.14/mm/
Dpage_io.c34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); in get_swap_bio()
35 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; in get_swap_bio()
62 (unsigned long long)bio->bi_iter.bi_sector); in end_swap_bio_write()
79 (unsigned long long)bio->bi_iter.bi_sector); in end_swap_bio_read()
/linux-4.4.14/include/trace/events/
Dblock.h273 __entry->sector = bio->bi_iter.bi_sector;
310 __entry->sector = bio->bi_iter.bi_sector;
338 __entry->sector = bio->bi_iter.bi_sector;
405 __entry->sector = bio->bi_iter.bi_sector;
433 __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
568 __entry->sector = bio->bi_iter.bi_sector;
609 __entry->sector = bio->bi_iter.bi_sector;
Dbcache.h27 __entry->sector = bio->bi_iter.bi_sector;
28 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
102 __entry->sector = bio->bi_iter.bi_sector;
137 __entry->sector = bio->bi_iter.bi_sector;
168 __entry->sector = bio->bi_iter.bi_sector;
Df2fs.h790 __entry->sector = bio->bi_iter.bi_sector;
/linux-4.4.14/block/
Dblk-lib.c101 bio->bi_iter.bi_sector = sector; in blkdev_issue_discard()
172 bio->bi_iter.bi_sector = sector; in blkdev_issue_write_same()
237 bio->bi_iter.bi_sector = sector; in __blkdev_issue_zeroout()
Dblk-merge.c46 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split()
74 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); in get_max_io_size()
780 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge()
782 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge()
Dblk-core.c1581 req->__sector = bio->bi_iter.bi_sector; in bio_attempt_front_merge()
1695 req->__sector = bio->bi_iter.bi_sector; in init_request_from_bio()
1827 bio->bi_iter.bi_sector += p->start_sect; in blk_partition_remap()
1832 bio->bi_iter.bi_sector - p->start_sect); in blk_partition_remap()
1896 sector_t sector = bio->bi_iter.bi_sector; in bio_check_eod()
1932 (long long) bio->bi_iter.bi_sector); in generic_make_request_checks()
2122 (unsigned long long)bio->bi_iter.bi_sector, in submit_bio()
Dblk-flush.c496 *error_sector = bio->bi_iter.bi_sector; in blkdev_issue_flush()
Dbio-integrity.c309 bip_set_seed(bip, bio->bi_iter.bi_sector); in bio_integrity_prep()
Dbio.c667 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; in bio_clone_bioset()
Delevator.c443 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); in elv_merge()
/linux-4.4.14/drivers/s390/block/
Dxpram.c195 if ((bio->bi_iter.bi_sector & 7) != 0 || in xpram_make_request()
202 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset) in xpram_make_request()
204 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset; in xpram_make_request()
Ddcssblk.c836 if ((bio->bi_iter.bi_sector & 7) != 0 || in dcssblk_make_request()
860 index = (bio->bi_iter.bi_sector >> 3); in dcssblk_make_request()
/linux-4.4.14/fs/ext4/
Dpage-io.c315 sector_t bi_sector = bio->bi_iter.bi_sector; in ext4_end_bio() local
329 bi_sector >> (inode->i_blkbits - 9)); in ext4_end_bio()
383 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in io_submit_init_bio()
Dreadpage.c294 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); in ext4_mpage_readpages()
Dcrypto.c427 bio->bi_iter.bi_sector = in ext4_encrypted_zeroout()
/linux-4.4.14/drivers/block/
Dpktcdvd.c655 if (s <= tmp->bio->bi_iter.bi_sector) in pkt_rbtree_find()
664 if (s > tmp->bio->bi_iter.bi_sector) { in pkt_rbtree_find()
669 BUG_ON(s > tmp->bio->bi_iter.bi_sector); in pkt_rbtree_find()
680 sector_t s = node->bio->bi_iter.bi_sector; in pkt_rbtree_insert()
686 if (s < tmp->bio->bi_iter.bi_sector) in pkt_rbtree_insert()
865 if (bio && (bio->bi_iter.bi_sector == in pkt_iosched_process_queue()
988 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_error); in pkt_end_io_read()
1036 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / in pkt_gather_data()
1064 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1161 pkt->bio->bi_iter.bi_sector = new_sector; in pkt_start_recovery()
[all …]
Dumem.c389 desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9); in add_bio()
531 (unsigned long long)bio->bi_iter.bi_sector, in mm_make_request()
Dbrd.c335 sector = bio->bi_iter.bi_sector; in brd_make_request()
Dps3disk.c106 iter.bio->bi_iter.bi_sector); in ps3disk_scatter_gather()
Dnull_blk.c457 rq->__sector = bio->bi_iter.bi_sector; in null_lnvm_submit_io()
Dps3vram.c556 loff_t offset = bio->bi_iter.bi_sector << 9; in ps3vram_do_bio()
Dfloppy.c3815 bio.bi_iter.bi_sector = 0; in __floppy_read_block_0()
Drbd.c2471 bio_list->bi_iter.bi_sector << SECTOR_SHIFT); in rbd_img_request_fill()
/linux-4.4.14/include/linux/
Dbio.h100 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
226 iter->bi_sector += bytes >> 9; in bio_advance_iter()
396 return bip->bip_iter.bi_sector; in bip_get_seed()
402 bip->bip_iter.bi_sector = seed; in bip_set_seed()
Dblk_types.h32 sector_t bi_sector; /* device address in 512 byte member
/linux-4.4.14/drivers/target/
Dtarget_core_iblock.c336 bio->bi_iter.bi_sector = lba; in iblock_get_bio()
613 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; in iblock_alloc_bip()
616 (unsigned long long)bip->bip_iter.bi_sector); in iblock_alloc_bip()
/linux-4.4.14/drivers/nvdimm/
Dblk.c196 bvec.bv_offset, rw, iter.bi_sector); in nd_blk_make_request()
201 (unsigned long long) iter.bi_sector, len); in nd_blk_make_request()
Dpmem.c79 bio_data_dir(bio), iter.bi_sector); in pmem_make_request()
Dbtt.c1186 rw, iter.bi_sector); in btt_make_request()
1191 (unsigned long long) iter.bi_sector, len); in btt_make_request()
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
Dlloop.c219 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; in do_bio_lustrebacked()
312 (unsigned long long)(*bio)->bi_iter.bi_sector, in loop_get_bio()
348 (unsigned long long)old_bio->bi_iter.bi_sector, in loop_make_request()
/linux-4.4.14/drivers/lightnvm/
Drrpc.h150 return bio->bi_iter.bi_sector / NR_PHY_IN_LOG; in rrpc_get_laddr()
Drrpc.c95 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG; in rrpc_discard()
317 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); in rrpc_move_valid_pages()
335 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr); in rrpc_move_valid_pages()
/linux-4.4.14/fs/btrfs/
Draid56.c1078 last_end = (u64)last->bi_iter.bi_sector << 9; in rbio_add_io_page()
1101 bio->bi_iter.bi_sector = disk_start >> 9; in rbio_add_io_page()
1156 start = (u64)bio->bi_iter.bi_sector << 9; in index_rbio_pages()
1342 u64 physical = bio->bi_iter.bi_sector; in find_bio_stripe()
1369 u64 logical = bio->bi_iter.bi_sector; in find_logical_bio_stripe()
1670 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; in plug_cmp()
1671 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; in plug_cmp()
Dcompression.c171 (u64)bio->bi_iter.bi_sector << 9); in end_compressed_bio_read()
510 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { in add_ra_bio_pages()
577 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; in btrfs_submit_compressed_read()
Dfile-item.c220 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9; in __btrfs_lookup_bio_sums()
455 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9; in btrfs_csum_one_bio()
478 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) + in btrfs_csum_one_bio()
Dscrub.c1450 bio->bi_iter.bi_sector = page->logical >> 9; in scrub_submit_raid56_bio_wait()
1506 bio->bi_iter.bi_sector = page->physical >> 9; in scrub_recheck_block()
1587 bio->bi_iter.bi_sector = page_bad->physical >> 9; in scrub_repair_page_from_good_copy()
1687 bio->bi_iter.bi_sector = sbio->physical >> 9; in scrub_add_page_to_wr_bio()
2090 bio->bi_iter.bi_sector = sbio->physical >> 9; in scrub_add_page_to_rd_bio()
2208 bio->bi_iter.bi_sector = logical >> 9; in scrub_missing_raid56_pages()
2785 bio->bi_iter.bi_sector = sparity->logic_start >> 9; in scrub_parity_check_and_repair()
4391 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; in write_page_nocow()
Dcheck-integrity.c1685 bio->bi_iter.bi_sector = dev_bytenr >> 9; in btrfsic_read_block()
2982 dev_bytenr = 512 * bio->bi_iter.bi_sector; in __btrfsic_submit_bio()
2990 (unsigned long long)bio->bi_iter.bi_sector, in __btrfsic_submit_bio()
Dextent_io.c2127 bio->bi_iter.bi_sector = sector; in repair_io_failure()
2435 bio->bi_iter.bi_sector = failrec->logical >> 9; in btrfs_create_repair_bio()
2635 "mirror=%u\n", (u64)bio->bi_iter.bi_sector, in end_bio_extent_readpage()
2769 bio->bi_iter.bi_sector = first_sector; in btrfs_bio_alloc()
2876 contig = bio->bi_iter.bi_sector == sector; in submit_extent_page()
Dvolumes.c5992 bio->bi_iter.bi_sector = physical >> 9; in submit_stripe_bio()
6001 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev, in submit_stripe_bio()
6024 bio->bi_iter.bi_sector = logical >> 9; in bbio_error()
6035 u64 logical = (u64)bio->bi_iter.bi_sector << 9; in btrfs_map_bio()
Dinode.c1809 u64 logical = (u64)bio->bi_iter.bi_sector << 9; in btrfs_merge_bio_hook()
8069 (unsigned long long)bio->bi_iter.bi_sector, in btrfs_end_dio_bio()
8201 u64 start_sector = orig_bio->bi_iter.bi_sector; in btrfs_submit_direct_hook()
8334 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; in btrfs_submit_direct()
/linux-4.4.14/arch/m68k/emu/
Dnfblock.c68 sector_t sec = bio->bi_iter.bi_sector; in nfhd_make_request()
/linux-4.4.14/Documentation/block/
Dbiovecs.txt12 update bi_sector and bi_size, and advance bi_idx to the next biovec. If it
17 partially complete a bio is segregated into struct bvec_iter: bi_sector,
/linux-4.4.14/fs/hfsplus/
Dwrapper.c66 bio->bi_iter.bi_sector = sector; in hfsplus_submit_bio()
/linux-4.4.14/drivers/block/rsxx/
Ddev.c195 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size); in rsxx_make_request()
Ddma.c700 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ in rsxx_dma_queue_bio()
/linux-4.4.14/arch/powerpc/sysdev/
Daxonram.c116 phys_mem = bank->io_addr + (bio->bi_iter.bi_sector << in axon_ram_make_request()
/linux-4.4.14/fs/jfs/
Djfs_metapage.c434 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); in metapage_writepage()
513 bio->bi_iter.bi_sector = in metapage_readpage()
Djfs_logmgr.c2000 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmRead()
2143 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); in lbmStartIO()
/linux-4.4.14/kernel/power/
Dswap.c239 (unsigned long long)bio->bi_iter.bi_sector); in hib_end_io()
261 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); in hib_submit_io()
266 (unsigned long long)bio->bi_iter.bi_sector); in hib_submit_io()
/linux-4.4.14/arch/xtensa/platforms/iss/
Dsimdisk.c109 sector_t sector = bio->bi_iter.bi_sector; in simdisk_make_request()
/linux-4.4.14/fs/nfs/blocklayout/
Dblocklayout.c111 (unsigned long long)bio->bi_iter.bi_sector); in bl_submit_bio()
131 bio->bi_iter.bi_sector = disk_sector; in bl_alloc_init_bio()
/linux-4.4.14/drivers/block/zram/
Dzram_drv.c848 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; in __zram_make_request()
849 offset = (bio->bi_iter.bi_sector & in __zram_make_request()
906 if (!valid_io_request(zram, bio->bi_iter.bi_sector, in zram_make_request()
/linux-4.4.14/kernel/trace/
Dblktrace.c778 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, in blk_add_trace_bio()
882 __blk_add_trace(bt, bio->bi_iter.bi_sector, in blk_add_trace_split()
915 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, in blk_add_trace_bio_remap()
/linux-4.4.14/fs/nilfs2/
Dsegbuf.c406 bio->bi_iter.bi_sector = in nilfs_alloc_seg_bio()
/linux-4.4.14/fs/
Dmpage.c82 bio->bi_iter.bi_sector = first_sector; in mpage_alloc()
Dbuffer.c2978 if (unlikely(bio->bi_iter.bi_sector >= maxsector)) in guard_bio_eod()
2981 maxsector -= bio->bi_iter.bi_sector; in guard_bio_eod()
3027 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in submit_bh_wbc()
Ddirect-io.c371 bio->bi_iter.bi_sector = first_sector; in dio_bio_alloc()
/linux-4.4.14/drivers/nvme/host/
Dlightnvm.c450 rqd->bio->bi_iter.bi_sector)); in nvme_nvm_rqtocmd()
/linux-4.4.14/fs/f2fs/
Ddata.c96 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); in __bio_alloc()
983 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr); in f2fs_mpage_readpages()
/linux-4.4.14/fs/gfs2/
Dlops.c269 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); in gfs2_log_alloc_bio()
Dops_fstype.c243 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); in gfs2_read_super()
/linux-4.4.14/drivers/block/drbd/
Ddrbd_actlog.c156 bio->bi_iter.bi_sector = sector; in _drbd_md_sync_page_io()
Ddrbd_bitmap.c1016 bio->bi_iter.bi_sector = on_disk_sector; in bm_page_io_async()
Ddrbd_req.c67 req->i.sector = bio_src->bi_iter.bi_sector; in drbd_req_new()
Ddrbd_receiver.c1419 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request()
1444 len, (uint64_t)bio->bi_iter.bi_sector); in drbd_submit_peer_request()
1730 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); in recv_dless_read()
/linux-4.4.14/drivers/block/xen-blkback/
Dblkback.c1350 bio->bi_iter.bi_sector = preq.sector_number; in dispatch_rw_block_io()
/linux-4.4.14/fs/xfs/
Dxfs_buf.c1170 bio->bi_iter.bi_sector = sector; in xfs_buf_ioapply_map()
Dxfs_aops.c395 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); in xfs_alloc_ioend_bio()
/linux-4.4.14/drivers/block/aoe/
Daoecmd.c344 put_lba(ah, f->iter.bi_sector); in ata_rw_frameinit()
/linux-4.4.14/fs/ocfs2/cluster/
Dheartbeat.c416 bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); in o2hb_setup_one_bio()