Lines Matching refs:geo
552 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio) in __raid10_find_phys() argument
562 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; in __raid10_find_phys()
563 last_far_set_start *= geo->far_set_size; in __raid10_find_phys()
565 last_far_set_size = geo->far_set_size; in __raid10_find_phys()
566 last_far_set_size += (geo->raid_disks % geo->far_set_size); in __raid10_find_phys()
569 chunk = r10bio->sector >> geo->chunk_shift; in __raid10_find_phys()
570 sector = r10bio->sector & geo->chunk_mask; in __raid10_find_phys()
572 chunk *= geo->near_copies; in __raid10_find_phys()
574 dev = sector_div(stripe, geo->raid_disks); in __raid10_find_phys()
575 if (geo->far_offset) in __raid10_find_phys()
576 stripe *= geo->far_copies; in __raid10_find_phys()
578 sector += stripe << geo->chunk_shift; in __raid10_find_phys()
581 for (n = 0; n < geo->near_copies; n++) { in __raid10_find_phys()
589 for (f = 1; f < geo->far_copies; f++) { in __raid10_find_phys()
590 set = d / geo->far_set_size; in __raid10_find_phys()
591 d += geo->near_copies; in __raid10_find_phys()
593 if ((geo->raid_disks % geo->far_set_size) && in __raid10_find_phys()
599 d %= geo->far_set_size; in __raid10_find_phys()
600 d += geo->far_set_size * set; in __raid10_find_phys()
602 s += geo->stride; in __raid10_find_phys()
608 if (dev >= geo->raid_disks) { in __raid10_find_phys()
610 sector += (geo->chunk_mask + 1); in __raid10_find_phys()
617 struct geom *geo = &conf->geo; in raid10_find_phys() local
623 geo = &conf->prev; in raid10_find_phys()
627 __raid10_find_phys(geo, r10bio); in raid10_find_phys()
636 struct geom *geo = &conf->geo; in raid10_find_virt() local
637 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size; in raid10_find_virt()
638 int far_set_size = geo->far_set_size; in raid10_find_virt()
641 if (geo->raid_disks % geo->far_set_size) { in raid10_find_virt()
642 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1; in raid10_find_virt()
643 last_far_set_start *= geo->far_set_size; in raid10_find_virt()
646 far_set_size = geo->far_set_size; in raid10_find_virt()
647 far_set_size += (geo->raid_disks % geo->far_set_size); in raid10_find_virt()
652 offset = sector & geo->chunk_mask; in raid10_find_virt()
653 if (geo->far_offset) { in raid10_find_virt()
655 chunk = sector >> geo->chunk_shift; in raid10_find_virt()
656 fc = sector_div(chunk, geo->far_copies); in raid10_find_virt()
657 dev -= fc * geo->near_copies; in raid10_find_virt()
661 while (sector >= geo->stride) { in raid10_find_virt()
662 sector -= geo->stride; in raid10_find_virt()
663 if (dev < (geo->near_copies + far_set_start)) in raid10_find_virt()
664 dev += far_set_size - geo->near_copies; in raid10_find_virt()
666 dev -= geo->near_copies; in raid10_find_virt()
668 chunk = sector >> geo->chunk_shift; in raid10_find_virt()
670 vchunk = chunk * geo->raid_disks + dev; in raid10_find_virt()
671 sector_div(vchunk, geo->near_copies); in raid10_find_virt()
672 return (vchunk << geo->chunk_shift) + offset; in raid10_find_virt()
706 struct geom *geo = &conf->geo; in read_balance() local
785 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) in read_balance()
789 if (geo->far_copies > 1) in read_balance()
834 (i < conf->geo.raid_disks || i < conf->prev.raid_disks) in raid10_congested()
1448 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); in make_request()
1468 && (conf->geo.near_copies < conf->geo.raid_disks in make_request()
1492 if (conf->geo.near_copies < conf->geo.raid_disks) in status()
1494 if (conf->geo.near_copies > 1) in status()
1495 seq_printf(seq, " %d near-copies", conf->geo.near_copies); in status()
1496 if (conf->geo.far_copies > 1) { in status()
1497 if (conf->geo.far_offset) in status()
1498 seq_printf(seq, " %d offset-copies", conf->geo.far_copies); in status()
1500 seq_printf(seq, " %d far-copies", conf->geo.far_copies); in status()
1501 if (conf->geo.far_set_size != conf->geo.raid_disks) in status()
1502 seq_printf(seq, " %d devices per set", conf->geo.far_set_size); in status()
1504 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, in status()
1505 conf->geo.raid_disks - mddev->degraded); in status()
1506 for (i = 0; i < conf->geo.raid_disks; i++) in status()
1527 disks = conf->geo.raid_disks; in _enough()
1528 ncopies = conf->geo.near_copies; in _enough()
1601 mdname(mddev), conf->geo.raid_disks - mddev->degraded); in error()
1614 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, in print_conf()
1615 conf->geo.raid_disks); in print_conf()
1617 for (i = 0; i < conf->geo.raid_disks; i++) { in print_conf()
1649 for (i = 0; i < conf->geo.raid_disks; i++) { in raid10_spare_active()
1691 int last = conf->geo.raid_disks - 1; in raid10_add_disk()
1779 number < conf->geo.raid_disks && in raid10_remove_disk()
2766 for (i = 0; i < conf->geo.raid_disks; i++) in init_resync()
2820 sector_t chunk_mask = conf->geo.chunk_mask; in sync_request()
2866 else for (i = 0; i < conf->geo.raid_disks; i++) { in sync_request()
2880 for (i = 0; i < conf->geo.raid_disks; i++) in sync_request()
2897 if (chunks_skipped >= conf->geo.raid_disks) { in sync_request()
2911 if (conf->geo.near_copies < conf->geo.raid_disks && in sync_request()
2936 for (i = 0 ; i < conf->geo.raid_disks; i++) { in sync_request()
2997 for (j = 0; j < conf->geo.raid_disks; j++) in sync_request()
3340 raid_disks = min(conf->geo.raid_disks, in raid10_size()
3345 size = sectors >> conf->geo.chunk_shift; in raid10_size()
3346 sector_div(size, conf->geo.far_copies); in raid10_size()
3348 sector_div(size, conf->geo.near_copies); in raid10_size()
3350 return size << conf->geo.chunk_shift; in raid10_size()
3360 size = size >> conf->geo.chunk_shift; in calc_sectors()
3361 sector_div(size, conf->geo.far_copies); in calc_sectors()
3362 size = size * conf->geo.raid_disks; in calc_sectors()
3363 sector_div(size, conf->geo.near_copies); in calc_sectors()
3371 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks); in calc_sectors()
3373 conf->dev_sectors = size << conf->geo.chunk_shift; in calc_sectors()
3375 if (conf->geo.far_offset) in calc_sectors()
3376 conf->geo.stride = 1 << conf->geo.chunk_shift; in calc_sectors()
3378 sector_div(size, conf->geo.far_copies); in calc_sectors()
3379 conf->geo.stride = size << conf->geo.chunk_shift; in calc_sectors()
3384 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) in setup_geo() argument
3415 geo->raid_disks = disks; in setup_geo()
3416 geo->near_copies = nc; in setup_geo()
3417 geo->far_copies = fc; in setup_geo()
3418 geo->far_offset = fo; in setup_geo()
3421 geo->far_set_size = disks; in setup_geo()
3425 geo->far_set_size = disks/fc; in setup_geo()
3426 WARN(geo->far_set_size < fc, in setup_geo()
3430 geo->far_set_size = fc * nc; in setup_geo()
3435 geo->chunk_mask = chunk - 1; in setup_geo()
3436 geo->chunk_shift = ffz(~chunk); in setup_geo()
3444 struct geom geo; in setup_conf() local
3447 copies = setup_geo(&geo, mddev, geo_new); in setup_conf()
3478 conf->geo = geo; in setup_conf()
3487 conf->prev = conf->geo; in setup_conf()
3559 if (conf->geo.raid_disks % conf->geo.near_copies) in run()
3560 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); in run()
3563 (conf->geo.raid_disks / conf->geo.near_copies)); in run()
3573 if (disk_idx >= conf->geo.raid_disks && in run()
3623 if (conf->geo.far_copies != 1 && in run()
3624 conf->geo.far_offset == 0) in run()
3633 i < conf->geo.raid_disks in run()
3663 mdname(mddev), conf->geo.raid_disks - mddev->degraded, in run()
3664 conf->geo.raid_disks); in run()
3674 int stripe = conf->geo.raid_disks * in run()
3681 stripe /= conf->geo.near_copies; in run()
3694 after_length = ((1 << conf->geo.chunk_shift) * in run()
3695 conf->geo.far_copies); in run()
3771 if (conf->geo.far_copies > 1 && !conf->geo.far_offset) in raid10_resize()
3874 struct geom geo; in raid10_check_reshape() local
3876 if (conf->geo.far_copies != 1 && !conf->geo.far_offset) in raid10_check_reshape()
3879 if (setup_geo(&geo, mddev, geo_start) != conf->copies) in raid10_check_reshape()
3882 if (geo.far_copies > 1 && !geo.far_offset) in raid10_check_reshape()
3886 if (mddev->array_sectors & geo.chunk_mask) in raid10_check_reshape()
3941 if (conf->geo.raid_disks == conf->prev.raid_disks) in calc_degraded()
3945 for (i = 0; i < conf->geo.raid_disks; i++) { in calc_degraded()
3955 if (conf->geo.raid_disks <= conf->prev.raid_disks) in calc_degraded()
3994 after_length = ((1 << conf->geo.chunk_shift) * in raid10_start_reshape()
3995 conf->geo.far_copies); in raid10_start_reshape()
4030 setup_geo(&conf->geo, mddev, geo_start); in raid10_start_reshape()
4050 conf->geo.raid_disks), in raid10_start_reshape()
4082 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4106 conf->geo = conf->prev; in raid10_start_reshape()
4107 mddev->raid_disks = conf->geo.raid_disks; in raid10_start_reshape()
4124 static sector_t last_dev_address(sector_t s, struct geom *geo) in last_dev_address() argument
4126 s = (s | geo->chunk_mask) + 1; in last_dev_address()
4127 s >>= geo->chunk_shift; in last_dev_address()
4128 s *= geo->near_copies; in last_dev_address()
4129 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks); in last_dev_address()
4130 s *= geo->far_copies; in last_dev_address()
4131 s <<= geo->chunk_shift; in last_dev_address()
4139 static sector_t first_dev_address(sector_t s, struct geom *geo) in first_dev_address() argument
4141 s >>= geo->chunk_shift; in first_dev_address()
4142 s *= geo->near_copies; in first_dev_address()
4143 sector_div(s, geo->raid_disks); in first_dev_address()
4144 s *= geo->far_copies; in first_dev_address()
4145 s <<= geo->chunk_shift; in first_dev_address()
4227 &conf->geo); in reshape_request()
4239 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask in reshape_request()
4247 next = last_dev_address(conf->reshape_progress, &conf->geo); in reshape_request()
4261 last = sector_nr | (conf->geo.chunk_mask in reshape_request()
4330 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4463 conf->prev = conf->geo; in end_reshape()
4474 int stripe = conf->geo.raid_disks * in end_reshape()
4476 stripe /= conf->geo.near_copies; in end_reshape()
4600 for (d = conf->geo.raid_disks ; in raid10_finish_reshape()
4601 d < conf->geo.raid_disks - mddev->delta_disks; in raid10_finish_reshape()
4612 mddev->chunk_sectors = 1 << conf->geo.chunk_shift; in raid10_finish_reshape()