Searched refs:chunk_sectors (Results 1 – 12 of 12) sorted by relevance
/linux-4.4.14/drivers/md/ |
D | raid0.c | 98 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones() 99 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones() 144 if ((mddev->chunk_sectors << 9) % blksize) { in create_strip_zones() 147 mddev->chunk_sectors << 9, blksize); in create_strip_zones() 311 unsigned int chunk_sects = mddev->chunk_sectors; in map_sector() 347 ~(sector_t)(mddev->chunk_sectors-1)); in raid0_size() 359 if (mddev->chunk_sectors == 0) { in raid0_run() 379 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run() 380 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run() 381 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run() [all …]
|
D | linear.c | 110 if (mddev->chunk_sectors) { in linear_conf() 112 sector_div(sectors, mddev->chunk_sectors); in linear_conf() 113 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf() 279 seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); in linear_status()
|
D | raid5.c | 770 if (!sector_div(tmp_sec, conf->chunk_sectors)) in stripe_add_to_batch_list() 2547 : conf->chunk_sectors; in raid5_compute_sector() 2743 : conf->chunk_sectors; in raid5_compute_blocknr() 3058 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx() 4697 unsigned int chunk_sectors; in in_chunk_boundary() local 4700 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary() 4701 return chunk_sectors >= in in_chunk_boundary() 4702 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary() 4872 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read() 5069 stripe_sectors = conf->chunk_sectors * in make_discard_request() [all …]
|
D | dm-raid.c | 360 if (region_size < rs->md.chunk_sectors) { in validate_region_size() 525 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; in parse_raid_params() 692 if (rs->md.chunk_sectors) in parse_raid_params() 693 max_io_len = rs->md.chunk_sectors; in parse_raid_params() 838 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); in super_sync() 937 if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) { in super_init_validation() 1474 raid_param_cnt, rs->md.chunk_sectors); in raid_status() 1617 unsigned chunk_size = rs->md.chunk_sectors << 9; in raid_io_hints()
|
D | raid5.h | 440 int chunk_sectors; member
|
D | md.c | 1062 mddev->chunk_sectors = sb->chunk_size >> 9; in super_90_validate() 1091 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_90_validate() 1237 sb->chunk_size = mddev->chunk_sectors << 9; in super_90_sync() 1549 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); in super_1_validate() 1607 mddev->new_chunk_sectors = mddev->chunk_sectors; in super_1_validate() 1712 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); in super_1_sync() 2278 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) in does_sb_need_changing() 3520 mddev->new_chunk_sectors = mddev->chunk_sectors; in level_store() 3543 mddev->chunk_sectors = mddev->new_chunk_sectors; in level_store() 3735 mddev->chunk_sectors != mddev->new_chunk_sectors) in chunk_size_show() [all …]
|
D | md.h | 262 int chunk_sectors; member
|
D | raid10.c | 1493 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); in status() 3391 chunk = mddev->chunk_sectors; in setup_geo() 3553 chunk_size = mddev->chunk_sectors << 9; in run() 3556 mddev->chunk_sectors); in run() 3675 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in run() 3814 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid10_takeover_raid0() 4475 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); in end_reshape() 4612 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()
|
D | raid1.c | 3048 if (mddev->chunk_sectors != mddev->new_chunk_sectors || in raid1_reshape() 3051 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid1_reshape()
|
/linux-4.4.14/drivers/char/ |
D | ps3flash.c | 38 u64 chunk_sectors; member 50 start_sector, priv->chunk_sectors, in ps3flash_read_write_sectors() 130 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_read() 163 sector += priv->chunk_sectors; in ps3flash_read() 199 sector = *pos / dev->bounce_size * priv->chunk_sectors; in ps3flash_write() 238 sector += priv->chunk_sectors; in ps3flash_write() 388 priv->chunk_sectors = dev->bounce_size / dev->blk_size; in ps3flash_probe()
|
/linux-4.4.14/block/ |
D | blk-settings.c | 96 lim->chunk_sectors = 0; in blk_set_default_limits() 268 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) in blk_queue_chunk_sectors() argument 270 BUG_ON(!is_power_of_2(chunk_sectors)); in blk_queue_chunk_sectors() 271 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
|
/linux-4.4.14/include/linux/ |
D | blkdev.h | 258 unsigned int chunk_sectors; member 882 if (!q->limits.chunk_sectors) in blk_max_size_offset() 885 return q->limits.chunk_sectors - in blk_max_size_offset() 886 (offset & (q->limits.chunk_sectors - 1)); in blk_max_size_offset() 896 if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) in blk_rq_get_max_sectors()
|