sectors_per_block  381 drivers/md/dm-cache-target.c 	sector_t sectors_per_block;
sectors_per_block  746 drivers/md/dm-cache-target.c 		oblocks = block_div(oblocks, cache->sectors_per_block);
sectors_per_block  820 drivers/md/dm-cache-target.c 			(block * cache->sectors_per_block) +
sectors_per_block  821 drivers/md/dm-cache-target.c 			sector_div(bi_sector, cache->sectors_per_block);
sectors_per_block  825 drivers/md/dm-cache-target.c 			(bi_sector & (cache->sectors_per_block - 1));
sectors_per_block  876 drivers/md/dm-cache-target.c 		(void) sector_div(block_nr, cache->sectors_per_block);
sectors_per_block 1154 drivers/md/dm-cache-target.c 		(bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
sectors_per_block 1192 drivers/md/dm-cache-target.c 	o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block;
sectors_per_block 1193 drivers/md/dm-cache-target.c 	o_region.count = cache->sectors_per_block;
sectors_per_block 1196 drivers/md/dm-cache-target.c 	c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
sectors_per_block 1197 drivers/md/dm-cache-target.c 	c_region.count = cache->sectors_per_block;
sectors_per_block 1673 drivers/md/dm-cache-target.c 		cache->sectors_per_block;
sectors_per_block 2397 drivers/md/dm-cache-target.c 							   cache->sectors_per_block);
sectors_per_block 2508 drivers/md/dm-cache-target.c 	cache->sectors_per_block = ca->block_size;
sectors_per_block 2509 drivers/md/dm-cache-target.c 	if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
sectors_per_block 2589 drivers/md/dm-cache-target.c 		calculate_discard_block_size(cache->sectors_per_block,
sectors_per_block 2990 drivers/md/dm-cache-target.c 	(void) sector_div(size, cache->sectors_per_block);
sectors_per_block 3195 drivers/md/dm-cache-target.c 		       (unsigned long long)cache->sectors_per_block,
sectors_per_block 3459 drivers/md/dm-cache-target.c 	else if (origin_limits->max_discard_sectors < cache->sectors_per_block)
sectors_per_block 3502 drivers/md/dm-cache-target.c 	if (io_opt_sectors < cache->sectors_per_block ||
sectors_per_block 3503 drivers/md/dm-cache-target.c 	    do_div(io_opt_sectors, cache->sectors_per_block)) {
sectors_per_block 3504 drivers/md/dm-cache-target.c 		blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
sectors_per_block 3505 drivers/md/dm-cache-target.c 		blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
sectors_per_block 1146 drivers/md/dm-era-target.c 	uint32_t sectors_per_block;
sectors_per_block 1187 drivers/md/dm-era-target.c 		(void) sector_div(block_nr, era->sectors_per_block);
sectors_per_block 1409 drivers/md/dm-era-target.c 	return dm_sector_div_up(era->ti->len, era->sectors_per_block);
sectors_per_block 1457 drivers/md/dm-era-target.c 	r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy);
sectors_per_block 1464 drivers/md/dm-era-target.c 	r = dm_set_target_max_io_len(ti, era->sectors_per_block);
sectors_per_block 1471 drivers/md/dm-era-target.c 	if (!valid_block_size(era->sectors_per_block)) {
sectors_per_block 1476 drivers/md/dm-era-target.c 	if (era->sectors_per_block & (era->sectors_per_block - 1))
sectors_per_block 1479 drivers/md/dm-era-target.c 		era->sectors_per_block_shift = __ffs(era->sectors_per_block);
sectors_per_block 1481 drivers/md/dm-era-target.c 	md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
sectors_per_block 1629 drivers/md/dm-era-target.c 		DMEMIT("%s %u", buf, era->sectors_per_block);
sectors_per_block 1683 drivers/md/dm-era-target.c 	if (io_opt_sectors < era->sectors_per_block ||
sectors_per_block 1684 drivers/md/dm-era-target.c 	    do_div(io_opt_sectors, era->sectors_per_block)) {
sectors_per_block 1686 drivers/md/dm-era-target.c 		blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT);
sectors_per_block   96 drivers/md/dm-integrity.c #define journal_entry_tag(ic, je)		((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
sectors_per_block  200 drivers/md/dm-integrity.c 	__u8 sectors_per_block;
sectors_per_block  409 drivers/md/dm-integrity.c 	BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));		\
sectors_per_block 1061 drivers/md/dm-integrity.c 	BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
sectors_per_block 1103 drivers/md/dm-integrity.c 	BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
sectors_per_block 1480 drivers/md/dm-integrity.c 	r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
sectors_per_block 1546 drivers/md/dm-integrity.c 				sectors_to_process -= ic->sectors_per_block;
sectors_per_block 1547 drivers/md/dm-integrity.c 				pos += ic->sectors_per_block << SECTOR_SHIFT;
sectors_per_block 1548 drivers/md/dm-integrity.c 				sector += ic->sectors_per_block;
sectors_per_block 1645 drivers/md/dm-integrity.c 	if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
sectors_per_block 1647 drivers/md/dm-integrity.c 		      ic->sectors_per_block,
sectors_per_block 1652 drivers/md/dm-integrity.c 	if (ic->sectors_per_block > 1) {
sectors_per_block 1656 drivers/md/dm-integrity.c 			if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
sectors_per_block 1658 drivers/md/dm-integrity.c 					bv.bv_offset, bv.bv_len, ic->sectors_per_block);
sectors_per_block 1743 drivers/md/dm-integrity.c 				} while (++s < ic->sectors_per_block);
sectors_per_block 1786 drivers/md/dm-integrity.c 				memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
sectors_per_block 1791 drivers/md/dm-integrity.c 				} while (++s < ic->sectors_per_block);
sectors_per_block 1805 drivers/md/dm-integrity.c 			logical_sector += ic->sectors_per_block;
sectors_per_block 1814 drivers/md/dm-integrity.c 			bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
sectors_per_block 1815 drivers/md/dm-integrity.c 		} while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
sectors_per_block 1917 drivers/md/dm-integrity.c 			} while ((i += ic->sectors_per_block) < dio->range.n_sectors);
sectors_per_block 1930 drivers/md/dm-integrity.c 				for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
sectors_per_block 1952 drivers/md/dm-integrity.c 			dio->range.n_sectors = ic->sectors_per_block;
sectors_per_block 2151 drivers/md/dm-integrity.c 	} while (++s < ic->sectors_per_block);
sectors_per_block 2186 drivers/md/dm-integrity.c 				if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
sectors_per_block 2188 drivers/md/dm-integrity.c 					sec &= ~(sector_t)(ic->sectors_per_block - 1);
sectors_per_block 2225 drivers/md/dm-integrity.c 					sec += ic->sectors_per_block;
sectors_per_block 2226 drivers/md/dm-integrity.c 					offset += ic->sectors_per_block;
sectors_per_block 2389 drivers/md/dm-integrity.c 				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
sectors_per_block 2390 drivers/md/dm-integrity.c 			logical_sector += ic->sectors_per_block;
sectors_per_block 2391 drivers/md/dm-integrity.c 			n_sectors -= ic->sectors_per_block;
sectors_per_block 2394 drivers/md/dm-integrity.c 		while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
sectors_per_block 2395 drivers/md/dm-integrity.c 				       ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
sectors_per_block 2396 drivers/md/dm-integrity.c 			n_sectors -= ic->sectors_per_block;
sectors_per_block 2432 drivers/md/dm-integrity.c 	for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
sectors_per_block 2940 drivers/md/dm-integrity.c 		arg_count += ic->sectors_per_block != 1;
sectors_per_block 2953 drivers/md/dm-integrity.c 		if (ic->sectors_per_block != 1)
sectors_per_block 2954 drivers/md/dm-integrity.c 			DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
sectors_per_block 2965 drivers/md/dm-integrity.c 			DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
sectors_per_block 3000 drivers/md/dm-integrity.c 	if (ic->sectors_per_block > 1) {
sectors_per_block 3001 drivers/md/dm-integrity.c 		limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
sectors_per_block 3002 drivers/md/dm-integrity.c 		limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
sectors_per_block 3003 drivers/md/dm-integrity.c 		blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
sectors_per_block 3012 drivers/md/dm-integrity.c 	ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
sectors_per_block 3070 drivers/md/dm-integrity.c 	ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
sectors_per_block 3100 drivers/md/dm-integrity.c 		ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
sectors_per_block 3647 drivers/md/dm-integrity.c 	ic->sectors_per_block = 1;
sectors_per_block 3694 drivers/md/dm-integrity.c 			ic->sectors_per_block = val >> SECTOR_SHIFT;
sectors_per_block 3880 drivers/md/dm-integrity.c 	if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
sectors_per_block 4090 drivers/md/dm-integrity.c 		unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
sectors_per_block  239 drivers/md/dm-thin.c 	uint32_t sectors_per_block;
sectors_per_block  375 drivers/md/dm-thin.c 		(b * pool->sectors_per_block);
sectors_per_block  688 drivers/md/dm-thin.c 		(void) sector_div(block_nr, pool->sectors_per_block);
sectors_per_block  703 drivers/md/dm-thin.c 	b += pool->sectors_per_block - 1ull; /* so we round up */
sectors_per_block  709 drivers/md/dm-thin.c 		(void) sector_div(b, pool->sectors_per_block);
sectors_per_block  710 drivers/md/dm-thin.c 		(void) sector_div(e, pool->sectors_per_block);
sectors_per_block  730 drivers/md/dm-thin.c 			(bi_sector & (pool->sectors_per_block - 1));
sectors_per_block  732 drivers/md/dm-thin.c 		bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
sectors_per_block  733 drivers/md/dm-thin.c 				 sector_div(bi_sector, pool->sectors_per_block);
sectors_per_block 1250 drivers/md/dm-thin.c 		(pool->sectors_per_block << SECTOR_SHIFT);
sectors_per_block 1357 drivers/md/dm-thin.c 		from.sector = data_origin * pool->sectors_per_block;
sectors_per_block 1361 drivers/md/dm-thin.c 		to.sector = data_dest * pool->sectors_per_block;
sectors_per_block 1370 drivers/md/dm-thin.c 		if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
sectors_per_block 1373 drivers/md/dm-thin.c 				data_dest * pool->sectors_per_block + len,
sectors_per_block 1374 drivers/md/dm-thin.c 				(data_dest + 1) * pool->sectors_per_block);
sectors_per_block 1387 drivers/md/dm-thin.c 		      tc->pool->sectors_per_block);
sectors_per_block 1413 drivers/md/dm-thin.c 			ll_zero(tc, m, data_block * pool->sectors_per_block,
sectors_per_block 1414 drivers/md/dm-thin.c 				(data_block + 1) * pool->sectors_per_block);
sectors_per_block 1424 drivers/md/dm-thin.c 	sector_t virt_block_begin = virt_block * pool->sectors_per_block;
sectors_per_block 1425 drivers/md/dm-thin.c 	sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
sectors_per_block 1430 drivers/md/dm-thin.c 			      pool->sectors_per_block);
sectors_per_block 2869 drivers/md/dm-thin.c 	else if (data_limits->max_discard_sectors < pool->sectors_per_block)
sectors_per_block 2973 drivers/md/dm-thin.c 	pool->sectors_per_block = block_size;
sectors_per_block 3480 drivers/md/dm-thin.c 	(void) sector_div(data_size, pool->sectors_per_block);
sectors_per_block 4042 drivers/md/dm-thin.c 		       (unsigned long)pool->sectors_per_block,
sectors_per_block 4076 drivers/md/dm-thin.c 	if (limits->max_sectors < pool->sectors_per_block) {
sectors_per_block 4077 drivers/md/dm-thin.c 		while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
sectors_per_block 4088 drivers/md/dm-thin.c 	if (io_opt_sectors < pool->sectors_per_block ||
sectors_per_block 4089 drivers/md/dm-thin.c 	    !is_factor(io_opt_sectors, pool->sectors_per_block)) {
sectors_per_block 4090 drivers/md/dm-thin.c 		if (is_factor(pool->sectors_per_block, limits->max_sectors))
sectors_per_block 4093 drivers/md/dm-thin.c 			blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
sectors_per_block 4094 drivers/md/dm-thin.c 		blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
sectors_per_block 4276 drivers/md/dm-thin.c 	r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
sectors_per_block 4444 drivers/md/dm-thin.c 			DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
sectors_per_block 4447 drivers/md/dm-thin.c 						tc->pool->sectors_per_block) - 1);
sectors_per_block 4483 drivers/md/dm-thin.c 	(void) sector_div(blocks, pool->sectors_per_block);
sectors_per_block 4485 drivers/md/dm-thin.c 		return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
sectors_per_block 4498 drivers/md/dm-thin.c 	limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
sectors_per_block  377 drivers/mtd/nftlmount.c 	int sectors_per_block;
sectors_per_block  380 drivers/mtd/nftlmount.c 	sectors_per_block = nftl->EraseSize / SECTORSIZE;
sectors_per_block  383 drivers/mtd/nftlmount.c 		for (i = 0; i < sectors_per_block; i++) {
sectors_per_block  153 drivers/mtd/rfd_ftl.c 	int sectors_per_block;
sectors_per_block  158 drivers/mtd/rfd_ftl.c 	sectors_per_block = part->block_size / SECTOR_SIZE;
sectors_per_block  166 drivers/mtd/rfd_ftl.c 			((HEADER_MAP_OFFSET + sectors_per_block) *
sectors_per_block  169 drivers/mtd/rfd_ftl.c 	part->data_sectors_per_block = sectors_per_block -
sectors_per_block  371 drivers/mtd/ssfdc.c 	int sectors_per_block, offset, block_address;
sectors_per_block  373 drivers/mtd/ssfdc.c 	sectors_per_block = ssfdc->erase_size >> SECTOR_SHIFT;
sectors_per_block  374 drivers/mtd/ssfdc.c 	offset = (int)(logic_sect_no % sectors_per_block);
sectors_per_block  375 drivers/mtd/ssfdc.c 	block_address = (int)(logic_sect_no / sectors_per_block);
sectors_per_block  378 drivers/mtd/ssfdc.c 		" block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
sectors_per_block  391 drivers/mtd/ssfdc.c 		sect_no = (unsigned long)block_address * sectors_per_block +