/linux-4.4.14/fs/nfs/blocklayout/ |
D | blocklayout.c | 157 disk_addr = (u64)isect << SECTOR_SHIFT; in do_add_page_to_bio() 174 disk_addr >> SECTOR_SHIFT, end_io, par); in do_add_page_to_bio() 250 isect = (sector_t) (f_offset >> SECTOR_SHIFT); in bl_read_pagelist() 296 isect += (pg_len >> SECTOR_SHIFT); in bl_read_pagelist() 297 extent_length -= (pg_len >> SECTOR_SHIFT); in bl_read_pagelist() 302 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) { in bl_read_pagelist() 306 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset; in bl_read_pagelist() 346 ext_tree_mark_written(bl, start >> SECTOR_SHIFT, in bl_write_cleanup() 347 (end - start) >> SECTOR_SHIFT); in bl_write_cleanup() 396 isect = offset >> SECTOR_SHIFT; in bl_write_pagelist() [all …]
|
D | blocklayout.h | 43 #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT) 44 #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT) 45 #define SECTOR_SIZE (1 << SECTOR_SHIFT)
|
D | extent_tree.c | 506 p = xdr_encode_hyper(p, be->be_f_offset << SECTOR_SHIFT); in ext_tree_encode_commit() 507 p = xdr_encode_hyper(p, be->be_length << SECTOR_SHIFT); in ext_tree_encode_commit()
|
/linux-4.4.14/drivers/block/zram/ |
D | zram_drv.h | 39 #define SECTOR_SHIFT 9 macro 40 #define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 45 (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
|
D | zram_drv.c | 128 end = start + (size >> SECTOR_SHIFT); in valid_io_request() 129 bound = zram->disksize >> SECTOR_SHIFT; in valid_io_request() 818 generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT, in zram_bvec_rw() 850 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; in __zram_make_request() 956 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; in zram_rw_page() 1061 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); in disksize_store()
|
/linux-4.4.14/drivers/block/ |
D | brd.c | 25 #define SECTOR_SHIFT 9 macro 26 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 198 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; in copy_to_brd_setup() 205 sector += copy >> SECTOR_SHIFT; in copy_to_brd_setup() 225 sector += PAGE_SIZE >> SECTOR_SHIFT; in discard_from_brd() 238 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; in copy_to_brd() 251 sector += copy >> SECTOR_SHIFT; in copy_to_brd() 270 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; in copy_from_brd() 284 sector += copy >> SECTOR_SHIFT; in copy_from_brd() 340 if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) || in brd_make_request() [all …]
|
D | rbd.c | 58 #define SECTOR_SHIFT 9 macro 59 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) 881 if (ondisk->options.order < SECTOR_SHIFT) in rbd_dev_ondisk_valid() 2471 bio_list->bi_iter.bi_sector << SECTOR_SHIFT); in rbd_img_request_fill() 3363 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; in rbd_queue_workfn()
|
/linux-4.4.14/drivers/mtd/ |
D | ssfdc.c | 39 #define SECTOR_SHIFT 9 macro 136 cis_sector = (int)(offset >> SECTOR_SHIFT); in get_valid_cis_sector() 157 loff_t offset = (loff_t)sect_no << SECTOR_SHIFT; in read_physical_sector() 311 ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT); in ssfdcr_add_mtd() 323 ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) / in ssfdcr_add_mtd() 375 sectors_per_block = ssfdc->erase_size >> SECTOR_SHIFT; in ssfdcr_readsect()
|
/linux-4.4.14/arch/xtensa/platforms/iss/ |
D | simdisk.c | 24 #define SECTOR_SHIFT 9 macro 75 unsigned long offset = sector << SECTOR_SHIFT; in simdisk_transfer() 76 unsigned long nbytes = nsect << SECTOR_SHIFT; in simdisk_transfer() 113 unsigned len = bvec.bv_len >> SECTOR_SHIFT; in simdisk_make_request() 175 set_capacity(dev->gd, dev->size >> SECTOR_SHIFT); in simdisk_attach()
|
/linux-4.4.14/drivers/md/ |
D | dm-verity.c | 552 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { in verity_map() 558 (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) { in verity_map() 570 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); in verity_map() 642 ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT) in verity_prepare_ioctl() 786 (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) in verity_ctr() 787 >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) { in verity_ctr() 794 if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) { in verity_ctr() 801 (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) in verity_ctr() 802 >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) { in verity_ctr()
|
D | dm-snap-persistent.c | 172 len = ps->store->chunk_size << SECTOR_SHIFT; in alloc_area() 300 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); in zero_memory_area() 390 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); in write_header() 501 ps->store->chunk_size << SECTOR_SHIFT, in read_exceptions() 546 memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); in read_exceptions() 627 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / in persistent_read_metadata()
|
D | dm-log-writes.c | 302 sector += block->vecs[i].bv_len >> SECTOR_SHIFT; in log_one_block() 335 return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT; in logdev_last_sector() 439 lc->sectorsize = 1 << SECTOR_SHIFT; in log_writes_ctr() 727 if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) in log_writes_prepare_ioctl() 770 limits->discard_granularity = 1 << SECTOR_SHIFT; in log_writes_io_hints() 771 limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); in log_writes_io_hints()
|
D | dm-io.c | 319 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); in do_region() 329 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; in do_region() 338 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; in do_region() 515 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); in dm_io()
|
D | dm-cache-metadata.h | 27 #define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
|
D | dm-bufio.c | 156 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT]; 157 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT]; 565 .count = b->c->block_size >> SECTOR_SHIFT, in use_dmio() 924 (c->sectors_per_block_bits + SECTOR_SHIFT); in __get_memory_limit() 1433 (SECTOR_SHIFT + c->sectors_per_block_bits); in dm_bufio_get_device_size() 1589 BUG_ON(block_size < 1 << SECTOR_SHIFT || in dm_bufio_client_create() 1601 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT; in dm_bufio_client_create()
|
D | dm-exception-store.h | 194 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; in get_dev_size()
|
D | dm-linear.c | 131 ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) in linear_prepare_ioctl()
|
D | dm-thin-metadata.h | 24 #define THIN_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
|
D | dm-table.c | 284 i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; in device_area_is_invalid() 286 limits->logical_block_size >> SECTOR_SHIFT; in device_area_is_invalid() 453 (unsigned long long) start << SECTOR_SHIFT); in dm_set_device_limits() 611 limits->logical_block_size >> SECTOR_SHIFT; in validate_hardware_logical_block_alignment() 647 SECTOR_SHIFT) - 1)) in validate_hardware_logical_block_alignment()
|
D | dm-log.c | 444 dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size, in create_log_context() 455 lc->header_location.count = buf_size >> SECTOR_SHIFT; in create_log_context() 477 (LOG_OFFSET << SECTOR_SHIFT); in create_log_context()
|
D | dm-exception-store.c | 179 if (chunk_size > INT_MAX >> SECTOR_SHIFT) { in dm_exception_store_set_chunk_size()
|
D | dm-era-target.c | 501 disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); in prepare_superblock() 1616 (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), in era_status() 1665 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; in get_dev_size() 1678 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; in era_io_hints() 1687 blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT); in era_io_hints()
|
D | dm-crypt.c | 711 for (i = 0; i < ((1 << SECTOR_SHIFT) / 8); i++) in crypt_iv_tcw_whitening() 853 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, in crypt_convert_block() 857 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, in crypt_convert_block() 860 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); in crypt_convert_block() 861 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); in crypt_convert_block() 870 1 << SECTOR_SHIFT, iv); in crypt_convert_block()
|
D | dm-thin.c | 44 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT) 45 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) 656 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT); in get_bio_block_range() 1092 (pool->sectors_per_block << SECTOR_SHIFT); in io_overlaps_block() 1798 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell() 3021 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; in get_dev_size() 3829 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; in pool_io_hints() 3855 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT); in pool_io_hints() 3857 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints() 3858 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints() [all …]
|
D | dm-cache-target.c | 150 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT) 151 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT) 1325 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); in bio_writes_complete_block() 2368 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; in get_dev_size() 3796 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; in set_discard_limits() 3802 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; in cache_io_hints() 3810 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); in cache_io_hints() 3811 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); in cache_io_hints()
|
D | dm-flakey.c | 387 ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) in flakey_prepare_ioctl()
|
D | dm-stripe.c | 411 unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT; in stripe_io_hints()
|
D | dm-switch.c | 529 i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) in switch_prepare_ioctl()
|
D | dm-cache-metadata.c | 311 sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT; in __write_initial_superblock() 499 cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, in __create_persistent_data_objects()
|
D | dm-kcopyd.c | 34 #define RESERVE_PAGES (DIV_ROUND_UP(SUB_JOB_SIZE << SECTOR_SHIFT, PAGE_SIZE))
|
D | dm-thin-metadata.c | 479 sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; in __write_initial_superblock() 688 pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, in __create_persistent_data_objects()
|
D | dm.c | 1461 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; in dm_accept_partial_bio() 1466 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; in dm_accept_partial_bio() 2472 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); in __set_size()
|
D | dm-mpath.c | 1582 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) in multipath_prepare_ioctl()
|
D | dm-snap.c | 1758 (s->store->chunk_size << SECTOR_SHIFT)) { in snapshot_map()
|
/linux-4.4.14/drivers/md/persistent-data/ |
D | dm-space-map-metadata.h | 12 #define DM_SM_METADATA_BLOCK_SIZE (4096 >> SECTOR_SHIFT)
|
/linux-4.4.14/drivers/nvdimm/ |
D | blk.c | 139 lba = div_u64(sector << SECTOR_SHIFT, blk_dev->sector_size); in nd_blk_do_bvec() 158 sector += blk_dev->sector_size >> SECTOR_SHIFT; in nd_blk_do_bvec() 288 set_capacity(disk, available_disk_size >> SECTOR_SHIFT); in nd_blk_attach_disk()
|
D | nd.h | 30 SECTOR_SHIFT = 9, enumerator
|
D | btt.c | 834 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size); in lba_to_arena() 1036 sector += btt->sector_size >> SECTOR_SHIFT; in btt_read_pg() 1124 sector += btt->sector_size >> SECTOR_SHIFT; in btt_write_pg()
|
/linux-4.4.14/include/linux/ |
D | device-mapper.h | 546 #define SECTOR_SHIFT 9 macro 598 return (n >> SECTOR_SHIFT); in to_sector() 603 return (n << SECTOR_SHIFT); in to_bytes()
|