Lines Matching refs:chunk_sectors

770 	if (!sector_div(tmp_sec, conf->chunk_sectors))  in stripe_add_to_batch_list()
2547 : conf->chunk_sectors; in raid5_compute_sector()
2743 : conf->chunk_sectors; in raid5_compute_blocknr()
3058 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; in stripe_set_idx()
4697 unsigned int chunk_sectors; in in_chunk_boundary() local
4700 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); in in_chunk_boundary()
4701 return chunk_sectors >= in in_chunk_boundary()
4702 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary()
4872 unsigned chunk_sects = mddev->chunk_sectors; in chunk_aligned_read()
5069 stripe_sectors = conf->chunk_sectors * in make_discard_request()
5075 logical_sector *= conf->chunk_sectors; in make_discard_request()
5076 last_sector *= conf->chunk_sectors; in make_discard_request()
5385 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors); in reshape_request()
6296 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_size()
6317 max(conf->chunk_sectors, in alloc_scratch_buffer()
6423 conf->scribble_sectors = max(conf->chunk_sectors, in raid5_alloc_percpu()
6563 conf->chunk_sectors = mddev->new_chunk_sectors; in setup_conf()
6610 conf->prev_chunk_sectors = mddev->chunk_sectors; in setup_conf()
6613 conf->prev_chunk_sectors = conf->chunk_sectors; in setup_conf()
6740 int chunk_sectors; in run() local
6764 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); in run()
6766 if (sector_div(here_new, chunk_sectors * new_data_disks)) { in run()
6771 reshape_offset = here_new * chunk_sectors; in run()
6774 sector_div(here_old, chunk_sectors * (old_disks-max_degraded)); in run()
6785 if (abs(min_offset_diff) >= mddev->chunk_sectors && in run()
6796 ? (here_new * chunk_sectors + min_offset_diff <= in run()
6797 here_old * chunk_sectors) in run()
6798 : (here_new * chunk_sectors >= in run()
6799 here_old * chunk_sectors + (-min_offset_diff))) { in run()
6812 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); in run()
6901 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); in run()
6963 ((mddev->chunk_sectors << 9) / PAGE_SIZE); in run()
6967 chunk_size = mddev->chunk_sectors << 9; in run()
7066 conf->chunk_sectors / 2, mddev->layout); in status()
7287 sectors &= ~((sector_t)conf->chunk_sectors - 1); in raid5_resize()
7321 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 in check_stripe_cache()
7327 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) in check_stripe_cache()
7342 mddev->new_chunk_sectors == mddev->chunk_sectors) in check_reshape()
7362 if (mddev->new_chunk_sectors > mddev->chunk_sectors || in check_reshape()
7368 mddev->chunk_sectors) in check_reshape()
7419 conf->prev_chunk_sectors = conf->chunk_sectors; in raid5_start_reshape()
7420 conf->chunk_sectors = mddev->new_chunk_sectors; in raid5_start_reshape()
7495 conf->chunk_sectors = conf->prev_chunk_sectors; in raid5_start_reshape()
7537 int stripe = data_disks * ((conf->chunk_sectors << 9) in end_reshape()
7575 mddev->chunk_sectors = conf->chunk_sectors; in raid5_finish_reshape()
7636 mddev->new_chunk_sectors = mddev->chunk_sectors; in raid45_takeover_raid0()
7736 conf->chunk_sectors = new_chunk ; in raid5_check_reshape()
7737 mddev->chunk_sectors = new_chunk; in raid5_check_reshape()