bio_sectors 217 block/bio-integrity.c if (!bio_sectors(bio)) bio_sectors 233 block/bio-integrity.c intervals = bio_integrity_intervals(bi, bio_sectors(bio)); bio_sectors 394 block/bio-integrity.c bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); bio_sectors 1896 block/bio.c BUG_ON(sectors >= bio_sectors(bio)); bio_sectors 652 block/blk-core.c if (blk_rq_sectors(req) + bio_sectors(bio) > bio_sectors 794 block/blk-core.c if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) bio_sectors 823 block/blk-core.c unsigned int nr_sectors = bio_sectors(bio); bio_sectors 855 block/blk-core.c if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) { bio_sectors 873 block/blk-core.c int nr_sectors = bio_sectors(bio); bio_sectors 1160 block/blk-core.c count = bio_sectors(bio); bio_sectors 1640 block/blk-iocost.c u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1); bio_sectors 85 block/blk-merge.c if (bio_sectors(bio) <= max_discard_sectors) bio_sectors 113 block/blk-merge.c if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) bio_sectors 129 block/blk-merge.c if (bio_sectors(bio) <= q->limits.max_write_same_sectors) bio_sectors 579 block/blk-merge.c if (blk_rq_sectors(req) + bio_sectors(bio) > bio_sectors 595 block/blk-merge.c if (blk_rq_sectors(req) + bio_sectors(bio) > bio_sectors 611 block/blk-merge.c if (blk_rq_sectors(req) + bio_sectors(next->bio) > bio_sectors 892 block/blk-merge.c else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) bio_sectors 307 block/bounce.c if (!passthrough && sectors < bio_sectors(*bio_orig)) { bio_sectors 1101 drivers/block/null_blk_main.c bio_sectors(bio) << SECTOR_SHIFT); bio_sectors 1287 drivers/block/null_blk_main.c sector_t nr_sectors = bio_sectors(bio); bio_sectors 2343 drivers/block/pktcdvd.c pd->stats.secs_r += bio_sectors(bio); bio_sectors 94 drivers/block/ps3disk.c __func__, __LINE__, i, bio_sectors(iter.bio), bio_sectors 101 drivers/block/rsxx/dev.c generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio), bio_sectors 150 drivers/ide/ide-cd.c unsigned long bio_sectors; bio_sectors 191 drivers/ide/ide-cd.c bio_sectors = max(bio_sectors(failed_command->bio), 4U); bio_sectors 192 drivers/ide/ide-cd.c sector &= ~(bio_sectors - 1); bio_sectors 32 drivers/lightnvm/pblk-cache.c generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio), bio_sectors 275 drivers/lightnvm/pblk-read.c generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio), bio_sectors 126 drivers/md/bcache/request.c bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); bio_sectors 128 drivers/md/bcache/request.c while (bio_sectors(bio)) { bio_sectors 129 drivers/md/bcache/request.c unsigned int sectors = min(bio_sectors(bio), bio_sectors 207 drivers/md/bcache/request.c if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) bio_sectors 234 drivers/md/bcache/request.c if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), bio_sectors 410 drivers/md/bcache/request.c bio_sectors(bio) & (c->sb.block_size - 1)) { bio_sectors 466 drivers/md/bcache/request.c bch_rescale_priorities(c, bio_sectors(bio)); bio_sectors 469 drivers/md/bcache/request.c bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); bio_sectors 536 drivers/md/bcache/request.c unsigned int bio_sectors = bio_sectors(bio); bio_sectors 547 drivers/md/bcache/request.c BUG_ON(bio_sectors <= sectors); bio_sectors 910 drivers/md/bcache/request.c s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); bio_sectors 1208 drivers/md/bcache/request.c bio_sectors(bio), bio_sectors 1292 drivers/md/bcache/request.c unsigned int bytes = min(sectors, bio_sectors(bio)) << 9; bio_sectors 1329 drivers/md/bcache/request.c generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0); bio_sectors 79 drivers/md/bcache/writeback.h bio_sectors(bio))) bio_sectors 894 drivers/md/dm-cache-target.c pb->len = bio_sectors(bio); bio_sectors 312 drivers/md/dm-clone-target.c return (bio_data_dir(bio) == WRITE && bio_sectors(bio) == clone->region_size); bio_sectors 497 drivers/md/dm-clone-target.c bio_sectors(bio)); bio_sectors 825 drivers/md/dm-crypt.c if (!bio_sectors(bio) || !io->cc->on_disk_tag_size) bio_sectors 832 drivers/md/dm-crypt.c tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); bio_sectors 1629 drivers/md/dm-crypt.c sector += bio_sectors(clone); bio_sectors 2761 drivers/md/dm-crypt.c if (bio_sectors(bio)) bio_sectors 2788 drivers/md/dm-crypt.c unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); bio_sectors 2793 drivers/md/dm-crypt.c if (bio_sectors(bio) > cc->tag_pool_max_sectors) bio_sectors 298 drivers/md/dm-delay.c if (bio_sectors(bio)) bio_sectors 283 drivers/md/dm-flakey.c if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) bio_sectors 1433 drivers/md/dm-integrity.c if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { bio_sectors 1639 drivers/md/dm-integrity.c if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { bio_sectors 1641 drivers/md/dm-integrity.c (unsigned long long)dio->range.logical_sector, bio_sectors(bio), bio_sectors 1645 drivers/md/dm-integrity.c if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) { bio_sectors 1648 drivers/md/dm-integrity.c (unsigned long long)dio->range.logical_sector, bio_sectors(bio)); bio_sectors 1667 drivers/md/dm-integrity.c unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; bio_sectors 1870 drivers/md/dm-integrity.c dio->range.n_sectors = bio_sectors(bio); bio_sectors 93 drivers/md/dm-linear.c if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) bio_sectors 692 drivers/md/dm-log-writes.c if (!bio_sectors(bio) && !flush_bio) bio_sectors 726 drivers/md/dm-log-writes.c block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); bio_sectors 738 drivers/md/dm-log-writes.c if (flush_bio && !bio_sectors(bio)) { bio_sectors 469 drivers/md/dm-raid1.c io->count = bio_sectors(bio); bio_sectors 2667 drivers/md/dm-snap.c if (bio_sectors(bio) > available_sectors) bio_sectors 641 drivers/md/dm-verity-target.c if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & bio_sectors 1174 drivers/md/dm-writecache.c if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & bio_sectors 624 drivers/md/dm-zoned-target.c unsigned int nr_sectors = bio_sectors(bio); bio_sectors 46 drivers/md/dm-zoned.h #define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio)) bio_sectors 658 drivers/md/dm.c generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio), bio_sectors 663 drivers/md/dm.c bio->bi_iter.bi_sector, bio_sectors(bio), bio_sectors 678 drivers/md/dm.c bio->bi_iter.bi_sector, bio_sectors(bio), bio_sectors 1636 drivers/md/dm.c ci.sector_count = bio_sectors(bio); bio_sectors 1648 drivers/md/dm.c struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, bio_sectors 1708 drivers/md/dm.c ci.sector_count = bio_sectors(bio); bio_sectors 1725 drivers/md/dm.c sector_count = bio_sectors(*bio); bio_sectors 391 drivers/md/md.c if (bio_sectors(bio) != 0) bio_sectors 401 drivers/md/md.c sectors = bio_sectors(bio); bio_sectors 469 drivers/md/raid0.c + bio_sectors(bio)); bio_sectors 473 drivers/md/raid0.c + bio_sectors(bio)); bio_sectors 599 drivers/md/raid0.c if (sectors < bio_sectors(bio)) { bio_sectors 1193 drivers/md/raid1.c r1_bio->sectors = bio_sectors(bio); bio_sectors 1293 drivers/md/raid1.c if (max_sectors < bio_sectors(bio)) { bio_sectors 1464 drivers/md/raid1.c if (max_sectors < bio_sectors(bio)) { bio_sectors 1582 drivers/md/raid1.c bio->bi_iter.bi_sector, bio_sectors(bio)); bio_sectors 2228 drivers/md/raid1.c md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); bio_sectors 1192 drivers/md/raid10.c if (max_sectors < bio_sectors(bio)) { bio_sectors 1477 drivers/md/raid10.c if (r10_bio->sectors < bio_sectors(bio)) { bio_sectors 1526 drivers/md/raid10.c int sectors = bio_sectors(bio); bio_sectors 2096 drivers/md/raid10.c md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); bio_sectors 2120 drivers/md/raid10.c bio_sectors(tbio)); bio_sectors 2251 drivers/md/raid10.c md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); bio_sectors 2257 drivers/md/raid10.c bio_sectors(wbio2)); bio_sectors 5122 drivers/md/raid5.c unsigned int bio_sectors = bio_sectors(bio); bio_sectors 5128 drivers/md/raid5.c ((sector & (chunk_sectors - 1)) + bio_sectors); bio_sectors 5264 drivers/md/raid5.c bio_sectors(align_bi), bio_sectors 5301 drivers/md/raid5.c if (sectors < bio_sectors(raid_bio)) { bio_sectors 408 drivers/nvdimm/nd.h generic_start_io_acct(disk->queue, bio_op(bio), bio_sectors(bio), bio_sectors 353 drivers/scsi/sr.c bio_sectors(SCpnt->request->bio); bio_sectors 648 drivers/target/target_core_iblock.c bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); bio_sectors 298 fs/ext4/page-io.c (unsigned) bio_sectors(bio), bio_sectors 375 include/linux/bio.h if (sectors >= bio_sectors(bio)) bio_sectors 723 include/linux/blk-cgroup.h bio_issue_init(&bio->bi_issue, bio_sectors(bio)); bio_sectors 242 include/trace/events/block.h __entry->nr_sector = bio_sectors(bio); bio_sectors 279 include/trace/events/block.h __entry->nr_sector = bio_sectors(bio); bio_sectors 307 include/trace/events/block.h __entry->nr_sector = bio_sectors(bio); bio_sectors 374 include/trace/events/block.h __entry->nr_sector = bio_sectors(bio); bio_sectors 402 include/trace/events/block.h __entry->nr_sector = bio ? bio_sectors(bio) : 0; bio_sectors 578 include/trace/events/block.h __entry->nr_sector = bio_sectors(bio);