mddev 240 drivers/md/dm-raid.c struct mddev md; mddev 256 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 258 drivers/md/dm-raid.c l->new_level = mddev->new_level; mddev 259 drivers/md/dm-raid.c l->new_layout = mddev->new_layout; mddev 260 drivers/md/dm-raid.c l->new_chunk_sectors = mddev->new_chunk_sectors; mddev 265 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 267 drivers/md/dm-raid.c mddev->new_level = l->new_level; mddev 268 drivers/md/dm-raid.c mddev->new_layout = l->new_layout; mddev 269 drivers/md/dm-raid.c mddev->new_chunk_sectors = l->new_chunk_sectors; mddev 680 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 687 drivers/md/dm-raid.c rdev_for_each(rdev, mddev) mddev 689 drivers/md/dm-raid.c rdev->sectors = mddev->dev_sectors; mddev 709 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 711 drivers/md/dm-raid.c mddev->new_level = mddev->level; mddev 712 drivers/md/dm-raid.c mddev->new_layout = mddev->layout; mddev 713 drivers/md/dm-raid.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 722 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 724 drivers/md/dm-raid.c mddev->level = mddev->new_level; mddev 725 drivers/md/dm-raid.c mddev->layout = mddev->new_layout; mddev 726 drivers/md/dm-raid.c mddev->chunk_sectors = mddev->new_chunk_sectors; mddev 727 drivers/md/dm-raid.c mddev->raid_disks = rs->raid_disks; mddev 728 drivers/md/dm-raid.c mddev->delta_disks = 0; mddev 841 drivers/md/dm-raid.c rs->dev[i].rdev.mddev = &rs->md; mddev 1259 drivers/md/dm-raid.c jdev->mddev = &rs->md; mddev 1533 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 1534 drivers/md/dm-raid.c uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; mddev 1548 drivers/md/dm-raid.c conf = mddev->private; mddev 1556 drivers/md/dm-raid.c r = raid5_set_cache_size(mddev, nr_stripes); mddev 1622 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 1627 drivers/md/dm-raid.c delta_disks = mddev->delta_disks; mddev 1659 drivers/md/dm-raid.c rdev_for_each(rdev, mddev) mddev 1663 drivers/md/dm-raid.c mddev->array_sectors = array_sectors; mddev 1664 drivers/md/dm-raid.c mddev->dev_sectors = dev_sectors; mddev 1738 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 1751 drivers/md/dm-raid.c switch (mddev->level) { mddev 1754 drivers/md/dm-raid.c if ((mddev->new_level == 1 || mddev->new_level == 5) && mddev 1755 drivers/md/dm-raid.c mddev->raid_disks == 1) mddev 1759 drivers/md/dm-raid.c if (mddev->new_level == 10 && mddev 1760 drivers/md/dm-raid.c !(rs->raid_disks % mddev->raid_disks)) mddev 1764 drivers/md/dm-raid.c if (__within_range(mddev->new_level, 4, 6) && mddev 1765 drivers/md/dm-raid.c mddev->new_layout == ALGORITHM_PARITY_N && mddev 1766 drivers/md/dm-raid.c mddev->raid_disks > 1) mddev 1773 drivers/md/dm-raid.c if (__is_raid10_offset(mddev->layout)) mddev 1776 drivers/md/dm-raid.c near_copies = __raid10_near_copies(mddev->layout); mddev 1779 drivers/md/dm-raid.c if (mddev->new_level == 0) { mddev 1782 drivers/md/dm-raid.c !(mddev->raid_disks % near_copies)) { mddev 1783 drivers/md/dm-raid.c mddev->raid_disks /= near_copies; mddev 1784 drivers/md/dm-raid.c mddev->delta_disks = mddev->raid_disks; mddev 1790 drivers/md/dm-raid.c __raid10_far_copies(mddev->layout) > 1) mddev 1797 drivers/md/dm-raid.c if (mddev->new_level == 1 && mddev 1798 drivers/md/dm-raid.c max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks) mddev 1802 drivers/md/dm-raid.c if (__within_range(mddev->new_level, 4, 5) && mddev 1803 drivers/md/dm-raid.c mddev->raid_disks == 2) mddev 1809 drivers/md/dm-raid.c if (__within_range(mddev->new_level, 4, 5) && mddev 1810 drivers/md/dm-raid.c mddev->raid_disks == 2) { mddev 1811 drivers/md/dm-raid.c mddev->degraded = 1; mddev 1816 drivers/md/dm-raid.c if (mddev->new_level == 0 && mddev 1817 drivers/md/dm-raid.c mddev->raid_disks == 1) mddev 1821 drivers/md/dm-raid.c if (mddev->new_level == 10) mddev 1827 drivers/md/dm-raid.c if (mddev->new_level == 0) mddev 1831 drivers/md/dm-raid.c if ((mddev->new_level == 1 || mddev->new_level == 5) && mddev 1832 drivers/md/dm-raid.c mddev->raid_disks == 2) mddev 1836 drivers/md/dm-raid.c if (__within_range(mddev->new_level, 5, 6) && mddev 1837 drivers/md/dm-raid.c mddev->layout == ALGORITHM_PARITY_N) mddev 1843 drivers/md/dm-raid.c if (mddev->new_level == 0 && mddev 1844 drivers/md/dm-raid.c mddev->layout == ALGORITHM_PARITY_N) mddev 1848 drivers/md/dm-raid.c if (mddev->new_level == 4 && mddev 1849 drivers/md/dm-raid.c mddev->layout == ALGORITHM_PARITY_N) mddev 1853 drivers/md/dm-raid.c if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) && mddev 1854 drivers/md/dm-raid.c mddev->raid_disks == 2) mddev 1858 drivers/md/dm-raid.c if (mddev->new_level == 6 && mddev 1859 drivers/md/dm-raid.c ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || mddev 1860 drivers/md/dm-raid.c __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6))) mddev 1866 drivers/md/dm-raid.c if (mddev->new_level == 0 && mddev 1867 drivers/md/dm-raid.c mddev->layout == ALGORITHM_PARITY_N) mddev 1871 drivers/md/dm-raid.c if (mddev->new_level == 4 && mddev 1872 drivers/md/dm-raid.c mddev->layout == ALGORITHM_PARITY_N) mddev 1876 drivers/md/dm-raid.c if (mddev->new_level == 5 && mddev 1877 drivers/md/dm-raid.c ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || mddev 1878 drivers/md/dm-raid.c __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC))) mddev 1899 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 1907 drivers/md/dm-raid.c change = mddev->new_layout != mddev->layout || mddev 1908 drivers/md/dm-raid.c mddev->new_chunk_sectors != mddev->chunk_sectors || mddev 1917 drivers/md/dm-raid.c mddev->raid_disks != rs->raid_disks; mddev 1922 drivers/md/dm-raid.c !__is_raid10_far(mddev->new_layout) && mddev 2029 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 2031 drivers/md/dm-raid.c if (!mddev->pers || !mddev->pers->check_reshape) mddev 2033 drivers/md/dm-raid.c else if (mddev->degraded) mddev 2059 drivers/md/dm-raid.c md_error(rdev->mddev, rdev); mddev 2096 drivers/md/dm-raid.c static void super_sync(struct mddev *mddev, struct md_rdev *rdev) mddev 2102 drivers/md/dm-raid.c struct raid_set *rs = container_of(mddev, struct raid_set, md); mddev 2126 drivers/md/dm-raid.c sb->num_devices = cpu_to_le32(mddev->raid_disks); mddev 2129 drivers/md/dm-raid.c sb->events = cpu_to_le64(mddev->events); mddev 2132 drivers/md/dm-raid.c sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); mddev 2134 drivers/md/dm-raid.c sb->level = cpu_to_le32(mddev->level); mddev 2135 drivers/md/dm-raid.c sb->layout = cpu_to_le32(mddev->layout); mddev 2136 drivers/md/dm-raid.c sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); mddev 2143 drivers/md/dm-raid.c sb->new_level = cpu_to_le32(mddev->new_level); mddev 2144 drivers/md/dm-raid.c sb->new_layout = cpu_to_le32(mddev->new_layout); mddev 2145 drivers/md/dm-raid.c sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); mddev 2147 drivers/md/dm-raid.c sb->delta_disks = cpu_to_le32(mddev->delta_disks); mddev 2150 drivers/md/dm-raid.c sb->reshape_position = cpu_to_le64(mddev->reshape_position); mddev 2155 drivers/md/dm-raid.c if (mddev->delta_disks < 0 || mddev->reshape_backwards) mddev 2162 drivers/md/dm-raid.c sb->array_sectors = cpu_to_le64(mddev->array_sectors); mddev 2200 drivers/md/dm-raid.c super_sync(rdev->mddev, rdev); mddev 2206 drivers/md/dm-raid.c set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags); mddev 2227 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 2241 drivers/md/dm-raid.c mddev->events = events_sb ? : 1; mddev 2243 drivers/md/dm-raid.c mddev->reshape_position = MaxSector; mddev 2245 drivers/md/dm-raid.c mddev->raid_disks = le32_to_cpu(sb->num_devices); mddev 2246 drivers/md/dm-raid.c mddev->level = le32_to_cpu(sb->level); mddev 2247 drivers/md/dm-raid.c mddev->layout = le32_to_cpu(sb->layout); mddev 2248 drivers/md/dm-raid.c mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); mddev 2256 drivers/md/dm-raid.c mddev->new_level = le32_to_cpu(sb->new_level); mddev 2257 drivers/md/dm-raid.c mddev->new_layout = le32_to_cpu(sb->new_layout); mddev 2258 drivers/md/dm-raid.c mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors); mddev 2259 drivers/md/dm-raid.c mddev->delta_disks = le32_to_cpu(sb->delta_disks); mddev 2260 drivers/md/dm-raid.c mddev->array_sectors = le64_to_cpu(sb->array_sectors); mddev 2269 drivers/md/dm-raid.c if (mddev->delta_disks < 0 || mddev 2270 drivers/md/dm-raid.c (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS))) mddev 2271 drivers/md/dm-raid.c mddev->reshape_backwards = 1; mddev 2273 drivers/md/dm-raid.c mddev->reshape_backwards = 0; mddev 2275 drivers/md/dm-raid.c mddev->reshape_position = le64_to_cpu(sb->reshape_position); mddev 2276 drivers/md/dm-raid.c rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); mddev 2283 drivers/md/dm-raid.c struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout); mddev 2284 drivers/md/dm-raid.c struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); mddev 2295 drivers/md/dm-raid.c if (mddev->layout != mddev->new_layout) { mddev 2301 drivers/md/dm-raid.c le32_to_cpu(sb->layout), mddev->new_layout); mddev 2303 drivers/md/dm-raid.c if (mddev->chunk_sectors != mddev->new_chunk_sectors) mddev 2305 drivers/md/dm-raid.c mddev->chunk_sectors, mddev->new_chunk_sectors); mddev 2308 drivers/md/dm-raid.c mddev->raid_disks, mddev->raid_disks + rs->delta_disks); mddev 2311 drivers/md/dm-raid.c raid10_md_layout_to_format(mddev->layout), mddev 2312 drivers/md/dm-raid.c raid10_md_layout_to_copies(mddev->layout)); mddev 2314 drivers/md/dm-raid.c raid10_md_layout_to_format(mddev->new_layout), mddev 2315 drivers/md/dm-raid.c raid10_md_layout_to_copies(mddev->new_layout)); mddev 2324 drivers/md/dm-raid.c mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); mddev 2342 drivers/md/dm-raid.c rdev_for_each(r, mddev) { mddev 2367 drivers/md/dm-raid.c set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); mddev 2386 drivers/md/dm-raid.c (unsigned long long) mddev->recovery_cp); mddev 2390 drivers/md/dm-raid.c (unsigned long long) mddev->reshape_position); mddev 2400 drivers/md/dm-raid.c rdev_for_each(r, mddev) { mddev 2417 drivers/md/dm-raid.c if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) { mddev 2418 drivers/md/dm-raid.c if (mddev->raid_disks % __raid10_near_copies(mddev->layout) || mddev 2451 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 2463 drivers/md/dm-raid.c if (!mddev->events && super_init_validation(rs, rdev)) mddev 2478 drivers/md/dm-raid.c mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096); mddev 2479 drivers/md/dm-raid.c mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; mddev 2524 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 2527 drivers/md/dm-raid.c rdev_for_each(rdev, mddev) { mddev 2600 drivers/md/dm-raid.c rdev_for_each(rdev, mddev) mddev 2727 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 2729 drivers/md/dm-raid.c unsigned int d = mddev->raid_disks = rs->raid_disks; mddev 2738 drivers/md/dm-raid.c mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR, mddev 2742 drivers/md/dm-raid.c mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, mddev 2749 drivers/md/dm-raid.c clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags); mddev 2750 drivers/md/dm-raid.c mddev->recovery_cp = MaxSector; mddev 2758 drivers/md/dm-raid.c mddev->recovery_cp = rdev->recovery_offset = 0; mddev 2760 drivers/md/dm-raid.c set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); mddev 2773 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 2776 drivers/md/dm-raid.c if (rs->raid_disks != mddev->raid_disks && mddev 2777 drivers/md/dm-raid.c __is_raid10_near(mddev->layout) && mddev 2779 drivers/md/dm-raid.c rs->raid10_copies != __raid10_near_copies(mddev->layout)) { mddev 2793 drivers/md/dm-raid.c mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, mddev 2795 drivers/md/dm-raid.c mddev->new_layout = mddev->layout; mddev 2806 drivers/md/dm-raid.c mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks; mddev 2810 drivers/md/dm-raid.c mddev->raid_disks = rs->raid_disks; mddev 2821 drivers/md/dm-raid.c } else if (mddev->raid_disks < rs->raid_disks) mddev 2857 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 2860 drivers/md/dm-raid.c mddev->delta_disks = rs->delta_disks; mddev 2861 drivers/md/dm-raid.c cur_raid_devs = mddev->raid_disks; mddev 2864 drivers/md/dm-raid.c if (mddev->delta_disks && mddev 2865 drivers/md/dm-raid.c mddev->layout != mddev->new_layout) { mddev 2867 drivers/md/dm-raid.c mddev->new_layout = mddev->layout; mddev 2906 drivers/md/dm-raid.c rdev->sectors = mddev->dev_sectors; mddev 2910 drivers/md/dm-raid.c mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */ mddev 2915 drivers/md/dm-raid.c mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */ mddev 2940 drivers/md/dm-raid.c mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1; mddev 2947 drivers/md/dm-raid.c if (!mddev->reshape_backwards) mddev 3314 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 3324 drivers/md/dm-raid.c if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) mddev 3327 drivers/md/dm-raid.c md_handle_request(mddev, bio); mddev 3351 drivers/md/dm-raid.c static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery) mddev 3359 drivers/md/dm-raid.c (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) { mddev 3374 drivers/md/dm-raid.c if (mddev->reshape_position != MaxSector) mddev 3413 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 3423 drivers/md/dm-raid.c state = decipher_sync_action(mddev, recovery); mddev 3426 drivers/md/dm-raid.c r = mddev->recovery_cp; mddev 3428 drivers/md/dm-raid.c r = mddev->curr_resync_completed; mddev 3484 drivers/md/dm-raid.c rdev_for_each(rdev, mddev) mddev 3506 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 3507 drivers/md/dm-raid.c struct r5conf *conf = mddev->private; mddev 3521 drivers/md/dm-raid.c rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); mddev 3525 drivers/md/dm-raid.c DMEMIT("%s %d ", rt->name, mddev->raid_disks); mddev 3532 drivers/md/dm-raid.c mddev->resync_max_sectors : mddev->dev_sectors; mddev 3534 drivers/md/dm-raid.c resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? mddev 3535 drivers/md/dm-raid.c atomic64_read(&mddev->resync_mismatches) : 0; mddev 3611 drivers/md/dm-raid.c DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors); mddev 3623 drivers/md/dm-raid.c mddev->bitmap_info.daemon_sleep); mddev 3626 drivers/md/dm-raid.c mddev->sync_speed_min); mddev 3629 drivers/md/dm-raid.c mddev->sync_speed_max); mddev 3637 drivers/md/dm-raid.c mddev->bitmap_info.max_write_behind); mddev 3643 drivers/md/dm-raid.c (unsigned long long) to_sector(mddev->bitmap_info.chunksize)); mddev 3646 drivers/md/dm-raid.c raid10_md_layout_to_copies(mddev->layout)); mddev 3649 drivers/md/dm-raid.c raid10_md_layout_to_format(mddev->layout)); mddev 3652 drivers/md/dm-raid.c max(rs->delta_disks, mddev->delta_disks)); mddev 3673 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 3675 drivers/md/dm-raid.c if (!mddev->pers || !mddev->pers->sync_request) mddev 3679 drivers/md/dm-raid.c set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 3681 drivers/md/dm-raid.c clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 3684 drivers/md/dm-raid.c if (mddev->sync_thread) { mddev 3685 drivers/md/dm-raid.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 3686 drivers/md/dm-raid.c md_reap_sync_thread(mddev); mddev 3688 drivers/md/dm-raid.c } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle) mddev 3693 drivers/md/dm-raid.c set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); mddev 3696 drivers/md/dm-raid.c set_bit(MD_RECOVERY_CHECK, &mddev->recovery); mddev 3697 drivers/md/dm-raid.c set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); mddev 3698 drivers/md/dm-raid.c set_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 3700 drivers/md/dm-raid.c set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); mddev 3701 drivers/md/dm-raid.c set_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 3705 drivers/md/dm-raid.c if (mddev->ro == 2) { mddev 3709 drivers/md/dm-raid.c mddev->ro = 0; mddev 3710 drivers/md/dm-raid.c if (!mddev->suspended && mddev->sync_thread) mddev 3711 drivers/md/dm-raid.c md_wakeup_thread(mddev->sync_thread); mddev 3713 drivers/md/dm-raid.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 3714 drivers/md/dm-raid.c if (!mddev->suspended && mddev->thread) mddev 3715 drivers/md/dm-raid.c md_wakeup_thread(mddev->thread); mddev 3778 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 3782 drivers/md/dm-raid.c if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk) mddev 3787 drivers/md/dm-raid.c for (i = 0; i < mddev->raid_disks; i++) { mddev 3811 drivers/md/dm-raid.c if (mddev->pers->hot_remove_disk(mddev, r)) { mddev 3822 drivers/md/dm-raid.c if (mddev->pers->hot_add_disk(mddev, r)) { mddev 3872 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 3873 drivers/md/dm-raid.c int ro = mddev->ro; mddev 3875 drivers/md/dm-raid.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 3876 drivers/md/dm-raid.c mddev->ro = 0; mddev 3877 drivers/md/dm-raid.c md_update_sb(mddev, 1); mddev 3878 drivers/md/dm-raid.c mddev->ro = ro; mddev 3891 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 3892 drivers/md/dm-raid.c struct md_personality *pers = mddev->pers; mddev 3895 drivers/md/dm-raid.c set_bit(MD_RECOVERY_WAIT, &mddev->recovery); mddev 3906 drivers/md/dm-raid.c r = pers->check_reshape(mddev); mddev 3917 drivers/md/dm-raid.c r = pers->start_reshape(mddev); mddev 3938 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 3959 drivers/md/dm-raid.c if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && mddev 3960 drivers/md/dm-raid.c mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { mddev 3961 drivers/md/dm-raid.c r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, mddev 3969 drivers/md/dm-raid.c set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 3970 drivers/md/dm-raid.c if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) { mddev 3971 drivers/md/dm-raid.c set_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 3972 drivers/md/dm-raid.c mddev->resync_min = mddev->recovery_cp; mddev 3979 drivers/md/dm-raid.c mddev_lock_nointr(mddev); mddev 3981 drivers/md/dm-raid.c mddev_unlock(mddev); mddev 3993 drivers/md/dm-raid.c struct mddev *mddev = &rs->md; mddev 4006 drivers/md/dm-raid.c if (mddev->delta_disks < 0) mddev 4009 drivers/md/dm-raid.c mddev_lock_nointr(mddev); mddev 4010 drivers/md/dm-raid.c clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 4011 drivers/md/dm-raid.c mddev->ro = 0; mddev 4012 drivers/md/dm-raid.c mddev->in_sync = 0; mddev 4013 drivers/md/dm-raid.c mddev_resume(mddev); mddev 4014 drivers/md/dm-raid.c mddev_unlock(mddev); mddev 37 drivers/md/md-bitmap.c return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; mddev 149 drivers/md/md-bitmap.c static int read_sb_page(struct mddev *mddev, loff_t offset, mddev 158 drivers/md/md-bitmap.c rdev_for_each(rdev, mddev) { mddev 176 drivers/md/md-bitmap.c static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev) mddev 194 drivers/md/md-bitmap.c rdev = list_entry(&mddev->disks, struct md_rdev, same_set); mddev 197 drivers/md/md-bitmap.c rdev_dec_pending(rdev, mddev); mddev 199 drivers/md/md-bitmap.c list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) { mddev 216 drivers/md/md-bitmap.c struct mddev *mddev = bitmap->mddev; mddev 221 drivers/md/md-bitmap.c while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { mddev 223 drivers/md/md-bitmap.c loff_t offset = mddev->bitmap_info.offset; mddev 237 drivers/md/md-bitmap.c if (mddev->external) { mddev 244 drivers/md/md-bitmap.c < (rdev->data_offset + mddev->dev_sectors mddev 254 drivers/md/md-bitmap.c if (rdev->data_offset + mddev->dev_sectors mddev 269 drivers/md/md-bitmap.c md_super_write(mddev, rdev, mddev 276 drivers/md/md-bitmap.c if (wait && md_super_wait(mddev) < 0) mddev 442 drivers/md/md-bitmap.c md_super_wait(bitmap->mddev); mddev 451 drivers/md/md-bitmap.c if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ mddev 453 drivers/md/md-bitmap.c if (bitmap->mddev->bitmap_info.external) mddev 458 drivers/md/md-bitmap.c sb->events = cpu_to_le64(bitmap->mddev->events); mddev 459 drivers/md/md-bitmap.c if (bitmap->mddev->events < bitmap->events_cleared) mddev 461 drivers/md/md-bitmap.c bitmap->events_cleared = bitmap->mddev->events; mddev 469 drivers/md/md-bitmap.c sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); mddev 470 drivers/md/md-bitmap.c sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); mddev 472 drivers/md/md-bitmap.c sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); mddev 473 drivers/md/md-bitmap.c sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize); mddev 474 drivers/md/md-bitmap.c sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes); mddev 475 drivers/md/md-bitmap.c sb->sectors_reserved = cpu_to_le32(bitmap->mddev-> mddev 537 drivers/md/md-bitmap.c chunksize = bitmap->mddev->bitmap_info.chunksize; mddev 546 drivers/md/md-bitmap.c daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep; mddev 552 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; mddev 558 drivers/md/md-bitmap.c write_behind = bitmap->mddev->bitmap_info.max_write_behind; mddev 562 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.max_write_behind = write_behind; mddev 565 drivers/md/md-bitmap.c sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); mddev 567 drivers/md/md-bitmap.c memcpy(sb->uuid, bitmap->mddev->uuid, 16); mddev 571 drivers/md/md-bitmap.c bitmap->events_cleared = bitmap->mddev->events; mddev 572 drivers/md/md-bitmap.c sb->events_cleared = cpu_to_le64(bitmap->mddev->events); mddev 573 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.nodes = 0; mddev 591 drivers/md/md-bitmap.c loff_t offset = bitmap->mddev->bitmap_info.offset; mddev 593 drivers/md/md-bitmap.c if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { mddev 610 drivers/md/md-bitmap.c sector_t bm_blocks = bitmap->mddev->resync_max_sectors; mddev 613 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.chunksize >> 9); mddev 618 drivers/md/md-bitmap.c offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); mddev 630 drivers/md/md-bitmap.c err = read_sb_page(bitmap->mddev, mddev 650 drivers/md/md-bitmap.c strlcpy(bitmap->mddev->bitmap_info.cluster_name, mddev 675 drivers/md/md-bitmap.c sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); mddev 677 drivers/md/md-bitmap.c if (bitmap->mddev->persistent) { mddev 682 drivers/md/md-bitmap.c if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { mddev 688 drivers/md/md-bitmap.c if (!nodes && (events < bitmap->mddev->events)) { mddev 691 drivers/md/md-bitmap.c (unsigned long long) bitmap->mddev->events); mddev 701 drivers/md/md-bitmap.c strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); mddev 707 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.chunksize = chunksize; mddev 709 drivers/md/md-bitmap.c err = md_setup_cluster(bitmap->mddev, nodes); mddev 715 drivers/md/md-bitmap.c bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev); mddev 722 drivers/md/md-bitmap.c bitmap->events_cleared = bitmap->mddev->events; mddev 723 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.chunksize = chunksize; mddev 724 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; mddev 725 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.max_write_behind = write_behind; mddev 726 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.nodes = nodes; mddev 727 drivers/md/md-bitmap.c if (bitmap->mddev->bitmap_info.space == 0 || mddev 728 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.space > sectors_reserved) mddev 729 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.space = sectors_reserved; mddev 733 drivers/md/md-bitmap.c md_cluster_stop(bitmap->mddev); mddev 936 drivers/md/md-bitmap.c if (mddev_is_clustered(bitmap->mddev)) mddev 965 drivers/md/md-bitmap.c if (mddev_is_clustered(bitmap->mddev)) mddev 1030 drivers/md/md-bitmap.c if (bitmap->mddev->queue) mddev 1031 drivers/md/md-bitmap.c blk_add_trace_msg(bitmap->mddev->queue, mddev 1074 drivers/md/md-bitmap.c if (!file && !bitmap->mddev->bitmap_info.offset) { mddev 1103 drivers/md/md-bitmap.c if (!bitmap->mddev->bitmap_info.external) mddev 1106 drivers/md/md-bitmap.c if (mddev_is_clustered(bitmap->mddev)) mddev 1126 drivers/md/md-bitmap.c bitmap->mddev, mddev 1127 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.offset, mddev 1230 drivers/md/md-bitmap.c void md_bitmap_daemon_work(struct mddev *mddev) mddev 1241 drivers/md/md-bitmap.c mutex_lock(&mddev->bitmap_info.mutex); mddev 1242 drivers/md/md-bitmap.c bitmap = mddev->bitmap; mddev 1244 drivers/md/md-bitmap.c mutex_unlock(&mddev->bitmap_info.mutex); mddev 1248 drivers/md/md-bitmap.c + mddev->bitmap_info.daemon_sleep)) mddev 1253 drivers/md/md-bitmap.c mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; mddev 1258 drivers/md/md-bitmap.c if (bitmap->mddev->queue) mddev 1259 drivers/md/md-bitmap.c blk_add_trace_msg(bitmap->mddev->queue, mddev 1273 drivers/md/md-bitmap.c mddev->bitmap_info.external == 0) { mddev 1349 drivers/md/md-bitmap.c mddev->thread->timeout = mddev 1350 drivers/md/md-bitmap.c mddev->bitmap_info.daemon_sleep; mddev 1351 drivers/md/md-bitmap.c mutex_unlock(&mddev->bitmap_info.mutex); mddev 1409 drivers/md/md-bitmap.c bw, bitmap->mddev->bitmap_info.max_write_behind); mddev 1470 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.max_write_behind); mddev 1485 drivers/md/md-bitmap.c if (success && !bitmap->mddev->degraded && mddev 1486 drivers/md/md-bitmap.c bitmap->events_cleared < bitmap->mddev->events) { mddev 1487 drivers/md/md-bitmap.c bitmap->events_cleared = bitmap->mddev->events; mddev 1606 drivers/md/md-bitmap.c while (sector < bitmap->mddev->resync_max_sectors) { mddev 1625 drivers/md/md-bitmap.c + bitmap->mddev->bitmap_info.daemon_sleep))) mddev 1627 drivers/md/md-bitmap.c wait_event(bitmap->mddev->recovery_wait, mddev 1628 drivers/md/md-bitmap.c atomic_read(&bitmap->mddev->recovery_active) == 0); mddev 1630 drivers/md/md-bitmap.c bitmap->mddev->curr_resync_completed = sector; mddev 1631 drivers/md/md-bitmap.c set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags); mddev 1634 drivers/md/md-bitmap.c while (s < sector && s < bitmap->mddev->resync_max_sectors) { mddev 1639 drivers/md/md-bitmap.c sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed"); mddev 1643 drivers/md/md-bitmap.c void md_bitmap_sync_with_cluster(struct mddev *mddev, mddev 1647 drivers/md/md-bitmap.c struct bitmap *bitmap = mddev->bitmap; mddev 1699 drivers/md/md-bitmap.c if (sec < bitmap->mddev->recovery_cp) mddev 1704 drivers/md/md-bitmap.c bitmap->mddev->recovery_cp = sec; mddev 1711 drivers/md/md-bitmap.c void md_bitmap_flush(struct mddev *mddev) mddev 1713 drivers/md/md-bitmap.c struct bitmap *bitmap = mddev->bitmap; mddev 1722 drivers/md/md-bitmap.c sleep = mddev->bitmap_info.daemon_sleep * 2; mddev 1724 drivers/md/md-bitmap.c md_bitmap_daemon_work(mddev); mddev 1726 drivers/md/md-bitmap.c md_bitmap_daemon_work(mddev); mddev 1728 drivers/md/md-bitmap.c md_bitmap_daemon_work(mddev); mddev 1746 drivers/md/md-bitmap.c if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info && mddev 1747 drivers/md/md-bitmap.c bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev)) mddev 1748 drivers/md/md-bitmap.c md_cluster_stop(bitmap->mddev); mddev 1771 drivers/md/md-bitmap.c void md_bitmap_wait_behind_writes(struct mddev *mddev) mddev 1773 drivers/md/md-bitmap.c struct bitmap *bitmap = mddev->bitmap; mddev 1778 drivers/md/md-bitmap.c mdname(mddev)); mddev 1785 drivers/md/md-bitmap.c void md_bitmap_destroy(struct mddev *mddev) mddev 1787 drivers/md/md-bitmap.c struct bitmap *bitmap = mddev->bitmap; mddev 1792 drivers/md/md-bitmap.c md_bitmap_wait_behind_writes(mddev); mddev 1793 drivers/md/md-bitmap.c mempool_destroy(mddev->wb_info_pool); mddev 1794 drivers/md/md-bitmap.c mddev->wb_info_pool = NULL; mddev 1796 drivers/md/md-bitmap.c mutex_lock(&mddev->bitmap_info.mutex); mddev 1797 drivers/md/md-bitmap.c spin_lock(&mddev->lock); mddev 1798 drivers/md/md-bitmap.c mddev->bitmap = NULL; /* disconnect from the md device */ mddev 1799 drivers/md/md-bitmap.c spin_unlock(&mddev->lock); mddev 1800 drivers/md/md-bitmap.c mutex_unlock(&mddev->bitmap_info.mutex); mddev 1801 drivers/md/md-bitmap.c if (mddev->thread) mddev 1802 drivers/md/md-bitmap.c mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; mddev 1812 drivers/md/md-bitmap.c struct bitmap *md_bitmap_create(struct mddev *mddev, int slot) mddev 1815 drivers/md/md-bitmap.c sector_t blocks = mddev->resync_max_sectors; mddev 1816 drivers/md/md-bitmap.c struct file *file = mddev->bitmap_info.file; mddev 1822 drivers/md/md-bitmap.c BUG_ON(file && mddev->bitmap_info.offset); mddev 1824 drivers/md/md-bitmap.c if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { mddev 1826 drivers/md/md-bitmap.c mdname(mddev)); mddev 1840 drivers/md/md-bitmap.c bitmap->mddev = mddev; mddev 1843 drivers/md/md-bitmap.c if (mddev->kobj.sd) mddev 1844 drivers/md/md-bitmap.c bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap"); mddev 1861 drivers/md/md-bitmap.c if (!mddev->bitmap_info.external) { mddev 1866 drivers/md/md-bitmap.c if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags)) mddev 1872 drivers/md/md-bitmap.c if (mddev->bitmap_info.chunksize == 0 || mddev 1873 drivers/md/md-bitmap.c mddev->bitmap_info.daemon_sleep == 0) mddev 1882 drivers/md/md-bitmap.c err = md_bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, 1); mddev 1899 drivers/md/md-bitmap.c int md_bitmap_load(struct mddev *mddev) mddev 1904 drivers/md/md-bitmap.c struct bitmap *bitmap = mddev->bitmap; mddev 1910 drivers/md/md-bitmap.c rdev_for_each(rdev, mddev) mddev 1911 drivers/md/md-bitmap.c mddev_create_wb_pool(mddev, rdev, true); mddev 1913 drivers/md/md-bitmap.c if (mddev_is_clustered(mddev)) mddev 1914 drivers/md/md-bitmap.c md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes); mddev 1921 drivers/md/md-bitmap.c while (sector < mddev->resync_max_sectors) { mddev 1928 drivers/md/md-bitmap.c if (mddev->degraded == 0 mddev 1929 drivers/md/md-bitmap.c || bitmap->events_cleared == mddev->events) mddev 1932 drivers/md/md-bitmap.c start = mddev->recovery_cp; mddev 1934 drivers/md/md-bitmap.c mutex_lock(&mddev->bitmap_info.mutex); mddev 1936 drivers/md/md-bitmap.c mutex_unlock(&mddev->bitmap_info.mutex); mddev 1943 drivers/md/md-bitmap.c set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); mddev 1945 drivers/md/md-bitmap.c mddev->thread->timeout = mddev->bitmap_info.daemon_sleep; mddev 1946 drivers/md/md-bitmap.c md_wakeup_thread(mddev->thread); mddev 1957 drivers/md/md-bitmap.c struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot) mddev 1962 drivers/md/md-bitmap.c bitmap = md_bitmap_create(mddev, slot); mddev 1981 drivers/md/md-bitmap.c int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, mddev 1989 drivers/md/md-bitmap.c bitmap = get_bitmap_from_slot(mddev, slot); mddev 2003 drivers/md/md-bitmap.c md_bitmap_set_memory_bits(mddev->bitmap, block, 1); mddev 2004 drivers/md/md-bitmap.c md_bitmap_file_set_bit(mddev->bitmap, block); mddev 2017 drivers/md/md-bitmap.c md_bitmap_unplug(mddev->bitmap); mddev 2036 drivers/md/md-bitmap.c chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10; mddev 2043 drivers/md/md-bitmap.c chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize, mddev 2086 drivers/md/md-bitmap.c long space = bitmap->mddev->bitmap_info.space; mddev 2093 drivers/md/md-bitmap.c if (!bitmap->mddev->bitmap_info.external) mddev 2096 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.space = space; mddev 2105 drivers/md/md-bitmap.c if (!bitmap->mddev->bitmap_info.external) mddev 2113 drivers/md/md-bitmap.c if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) mddev 2115 drivers/md/md-bitmap.c !bitmap->mddev->bitmap_info.external, mddev 2116 drivers/md/md-bitmap.c mddev_is_clustered(bitmap->mddev) mddev 2133 drivers/md/md-bitmap.c bitmap->mddev->pers->quiesce(bitmap->mddev, 1); mddev 2152 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.chunksize = 1 << (chunkshift + mddev 2159 drivers/md/md-bitmap.c if (mddev_is_clustered(bitmap->mddev)) { mddev 2178 drivers/md/md-bitmap.c bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift + mddev 2249 drivers/md/md-bitmap.c bitmap->mddev->pers->quiesce(bitmap->mddev, 0); mddev 2258 drivers/md/md-bitmap.c location_show(struct mddev *mddev, char *page) mddev 2261 drivers/md/md-bitmap.c if (mddev->bitmap_info.file) mddev 2263 drivers/md/md-bitmap.c else if (mddev->bitmap_info.offset) mddev 2264 drivers/md/md-bitmap.c len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); mddev 2272 drivers/md/md-bitmap.c location_store(struct mddev *mddev, const char *buf, size_t len) mddev 2276 drivers/md/md-bitmap.c rv = mddev_lock(mddev); mddev 2279 drivers/md/md-bitmap.c if (mddev->pers) { mddev 2280 drivers/md/md-bitmap.c if (!mddev->pers->quiesce) { mddev 2284 drivers/md/md-bitmap.c if (mddev->recovery || mddev->sync_thread) { mddev 2290 drivers/md/md-bitmap.c if (mddev->bitmap || mddev->bitmap_info.file || mddev 2291 drivers/md/md-bitmap.c mddev->bitmap_info.offset) { mddev 2297 drivers/md/md-bitmap.c if (mddev->pers) { mddev 2298 drivers/md/md-bitmap.c mddev_suspend(mddev); mddev 2299 drivers/md/md-bitmap.c md_bitmap_destroy(mddev); mddev 2300 drivers/md/md-bitmap.c mddev_resume(mddev); mddev 2302 drivers/md/md-bitmap.c mddev->bitmap_info.offset = 0; mddev 2303 drivers/md/md-bitmap.c if (mddev->bitmap_info.file) { mddev 2304 drivers/md/md-bitmap.c struct file *f = mddev->bitmap_info.file; mddev 2305 drivers/md/md-bitmap.c mddev->bitmap_info.file = NULL; mddev 2328 drivers/md/md-bitmap.c if (mddev->bitmap_info.external == 0 && mddev 2329 drivers/md/md-bitmap.c mddev->major_version == 0 && mddev 2330 drivers/md/md-bitmap.c offset != mddev->bitmap_info.default_offset) { mddev 2334 drivers/md/md-bitmap.c mddev->bitmap_info.offset = offset; mddev 2335 drivers/md/md-bitmap.c if (mddev->pers) { mddev 2337 drivers/md/md-bitmap.c bitmap = md_bitmap_create(mddev, -1); mddev 2338 drivers/md/md-bitmap.c mddev_suspend(mddev); mddev 2342 drivers/md/md-bitmap.c mddev->bitmap = bitmap; mddev 2343 drivers/md/md-bitmap.c rv = md_bitmap_load(mddev); mddev 2345 drivers/md/md-bitmap.c mddev->bitmap_info.offset = 0; mddev 2348 drivers/md/md-bitmap.c md_bitmap_destroy(mddev); mddev 2349 drivers/md/md-bitmap.c mddev_resume(mddev); mddev 2352 drivers/md/md-bitmap.c mddev_resume(mddev); mddev 2356 drivers/md/md-bitmap.c if (!mddev->external) { mddev 2360 drivers/md/md-bitmap.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 2361 drivers/md/md-bitmap.c md_wakeup_thread(mddev->thread); mddev 2365 drivers/md/md-bitmap.c mddev_unlock(mddev); mddev 2379 drivers/md/md-bitmap.c space_show(struct mddev *mddev, char *page) mddev 2381 drivers/md/md-bitmap.c return sprintf(page, "%lu\n", mddev->bitmap_info.space); mddev 2385 drivers/md/md-bitmap.c space_store(struct mddev *mddev, const char *buf, size_t len) mddev 2397 drivers/md/md-bitmap.c if (mddev->bitmap && mddev 2398 drivers/md/md-bitmap.c sectors < (mddev->bitmap->storage.bytes + 511) >> 9) mddev 2404 drivers/md/md-bitmap.c mddev->bitmap_info.space = sectors; mddev 2412 drivers/md/md-bitmap.c timeout_show(struct mddev *mddev, char *page) mddev 2415 drivers/md/md-bitmap.c unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; mddev 2416 drivers/md/md-bitmap.c unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; mddev 2426 drivers/md/md-bitmap.c timeout_store(struct mddev *mddev, const char *buf, size_t len) mddev 2444 drivers/md/md-bitmap.c mddev->bitmap_info.daemon_sleep = timeout; mddev 2445 drivers/md/md-bitmap.c if (mddev->thread) { mddev 2450 drivers/md/md-bitmap.c if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) { mddev 2451 drivers/md/md-bitmap.c mddev->thread->timeout = timeout; mddev 2452 drivers/md/md-bitmap.c md_wakeup_thread(mddev->thread); mddev 2462 drivers/md/md-bitmap.c backlog_show(struct mddev *mddev, char *page) mddev 2464 drivers/md/md-bitmap.c return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); mddev 2468 drivers/md/md-bitmap.c backlog_store(struct mddev *mddev, const char *buf, size_t len) mddev 2471 drivers/md/md-bitmap.c unsigned long old_mwb = mddev->bitmap_info.max_write_behind; mddev 2477 drivers/md/md-bitmap.c mddev->bitmap_info.max_write_behind = backlog; mddev 2478 drivers/md/md-bitmap.c if (!backlog && mddev->wb_info_pool) { mddev 2480 drivers/md/md-bitmap.c mempool_destroy(mddev->wb_info_pool); mddev 2481 drivers/md/md-bitmap.c mddev->wb_info_pool = NULL; mddev 2482 drivers/md/md-bitmap.c } else if (backlog && !mddev->wb_info_pool) { mddev 2486 drivers/md/md-bitmap.c rdev_for_each(rdev, mddev) mddev 2487 drivers/md/md-bitmap.c mddev_create_wb_pool(mddev, rdev, false); mddev 2490 drivers/md/md-bitmap.c md_bitmap_update_sb(mddev->bitmap); mddev 2498 drivers/md/md-bitmap.c chunksize_show(struct mddev *mddev, char *page) mddev 2500 drivers/md/md-bitmap.c return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); mddev 2504 drivers/md/md-bitmap.c chunksize_store(struct mddev *mddev, const char *buf, size_t len) mddev 2509 drivers/md/md-bitmap.c if (mddev->bitmap) mddev 2517 drivers/md/md-bitmap.c mddev->bitmap_info.chunksize = csize; mddev 2524 drivers/md/md-bitmap.c static ssize_t metadata_show(struct mddev *mddev, char *page) mddev 2526 drivers/md/md-bitmap.c if (mddev_is_clustered(mddev)) mddev 2528 drivers/md/md-bitmap.c return sprintf(page, "%s\n", (mddev->bitmap_info.external mddev 2532 drivers/md/md-bitmap.c static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len) mddev 2534 drivers/md/md-bitmap.c if (mddev->bitmap || mddev 2535 drivers/md/md-bitmap.c mddev->bitmap_info.file || mddev 2536 drivers/md/md-bitmap.c mddev->bitmap_info.offset) mddev 2539 drivers/md/md-bitmap.c mddev->bitmap_info.external = 1; mddev 2542 drivers/md/md-bitmap.c mddev->bitmap_info.external = 0; mddev 2551 drivers/md/md-bitmap.c static ssize_t can_clear_show(struct mddev *mddev, char *page) mddev 2554 drivers/md/md-bitmap.c spin_lock(&mddev->lock); mddev 2555 drivers/md/md-bitmap.c if (mddev->bitmap) mddev 2556 drivers/md/md-bitmap.c len = sprintf(page, "%s\n", (mddev->bitmap->need_sync ? mddev 2560 drivers/md/md-bitmap.c spin_unlock(&mddev->lock); mddev 2564 drivers/md/md-bitmap.c static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len) mddev 2566 drivers/md/md-bitmap.c if (mddev->bitmap == NULL) mddev 2569 drivers/md/md-bitmap.c mddev->bitmap->need_sync = 1; mddev 2571 drivers/md/md-bitmap.c if (mddev->degraded) mddev 2573 drivers/md/md-bitmap.c mddev->bitmap->need_sync = 0; mddev 2583 drivers/md/md-bitmap.c behind_writes_used_show(struct mddev *mddev, char *page) mddev 2586 drivers/md/md-bitmap.c spin_lock(&mddev->lock); mddev 2587 drivers/md/md-bitmap.c if (mddev->bitmap == NULL) mddev 2591 drivers/md/md-bitmap.c mddev->bitmap->behind_writes_used); mddev 2592 drivers/md/md-bitmap.c spin_unlock(&mddev->lock); mddev 2597 drivers/md/md-bitmap.c behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len) mddev 2599 drivers/md/md-bitmap.c if (mddev->bitmap) mddev 2600 drivers/md/md-bitmap.c mddev->bitmap->behind_writes_used = 0; mddev 195 drivers/md/md-bitmap.h struct mddev *mddev; /* the md device that the bitmap is for */ mddev 239 drivers/md/md-bitmap.h struct bitmap *md_bitmap_create(struct mddev *mddev, int slot); mddev 240 drivers/md/md-bitmap.h int md_bitmap_load(struct mddev *mddev); mddev 241 drivers/md/md-bitmap.h void md_bitmap_flush(struct mddev *mddev); mddev 242 drivers/md/md-bitmap.h void md_bitmap_destroy(struct mddev *mddev); mddev 262 drivers/md/md-bitmap.h void md_bitmap_sync_with_cluster(struct mddev *mddev, mddev 267 drivers/md/md-bitmap.h void md_bitmap_daemon_work(struct mddev *mddev); mddev 271 drivers/md/md-bitmap.h struct bitmap *get_bitmap_from_slot(struct mddev *mddev, int slot); mddev 272 drivers/md/md-bitmap.h int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, mddev 275 drivers/md/md-bitmap.h void md_bitmap_wait_behind_writes(struct mddev *mddev); mddev 27 drivers/md/md-cluster.c struct mddev *mddev; /* pointing back to mddev. */ mddev 61 drivers/md/md-cluster.c struct mddev *mddev; /* the md device which md_cluster_info belongs to */ mddev 150 drivers/md/md-cluster.c struct mddev *mddev) mddev 162 drivers/md/md-cluster.c || test_bit(MD_CLOSING, &mddev->flags)); mddev 183 drivers/md/md-cluster.c static struct dlm_lock_resource *lockres_init(struct mddev *mddev, mddev 188 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 196 drivers/md/md-cluster.c res->mddev = mddev; mddev 268 drivers/md/md-cluster.c static int read_resync_info(struct mddev *mddev, mddev 272 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 288 drivers/md/md-cluster.c struct mddev *mddev = thread->mddev; mddev 289 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 299 drivers/md/md-cluster.c bm_lockres = lockres_init(mddev, str, NULL, 1); mddev 305 drivers/md/md-cluster.c ret = dlm_lock_sync_interruptible(bm_lockres, DLM_LOCK_PW, mddev); mddev 311 drivers/md/md-cluster.c ret = md_bitmap_copy_from_slot(mddev, slot, &lo, &hi, true); mddev 325 drivers/md/md-cluster.c if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && mddev 326 drivers/md/md-cluster.c test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev 327 drivers/md/md-cluster.c mddev->reshape_position != MaxSector) mddev 328 drivers/md/md-cluster.c md_wakeup_thread(mddev->sync_thread); mddev 331 drivers/md/md-cluster.c if (lo < mddev->recovery_cp) mddev 332 drivers/md/md-cluster.c mddev->recovery_cp = lo; mddev 335 drivers/md/md-cluster.c if (mddev->recovery_cp != MaxSector) { mddev 341 drivers/md/md-cluster.c &mddev->recovery); mddev 342 drivers/md/md-cluster.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 343 drivers/md/md-cluster.c md_wakeup_thread(mddev->thread); mddev 354 drivers/md/md-cluster.c struct mddev *mddev = arg; mddev 355 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 359 drivers/md/md-cluster.c static void __recover_slot(struct mddev *mddev, int slot) mddev 361 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 366 drivers/md/md-cluster.c mddev, "recover"); mddev 377 drivers/md/md-cluster.c struct mddev *mddev = arg; mddev 378 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 381 drivers/md/md-cluster.c mddev->bitmap_info.cluster_name, mddev 386 drivers/md/md-cluster.c __recover_slot(mddev, slot->slot - 1); mddev 393 drivers/md/md-cluster.c struct mddev *mddev = arg; mddev 394 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 422 drivers/md/md-cluster.c struct md_cluster_info *cinfo = res->mddev->cluster_info; mddev 432 drivers/md/md-cluster.c static void remove_suspend_info(struct mddev *mddev, int slot) mddev 434 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 435 drivers/md/md-cluster.c mddev->pers->quiesce(mddev, 1); mddev 440 drivers/md/md-cluster.c mddev->pers->quiesce(mddev, 0); mddev 443 drivers/md/md-cluster.c static void process_suspend_info(struct mddev *mddev, mddev 446 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 455 drivers/md/md-cluster.c clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); mddev 456 drivers/md/md-cluster.c remove_suspend_info(mddev, slot); mddev 457 drivers/md/md-cluster.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 458 drivers/md/md-cluster.c md_wakeup_thread(mddev->thread); mddev 462 drivers/md/md-cluster.c rdev_for_each(rdev, mddev) mddev 490 drivers/md/md-cluster.c md_bitmap_sync_with_cluster(mddev, cinfo->sync_low, mddev 495 drivers/md/md-cluster.c mddev->pers->quiesce(mddev, 1); mddev 501 drivers/md/md-cluster.c mddev->pers->quiesce(mddev, 0); mddev 504 drivers/md/md-cluster.c static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg) mddev 507 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 519 drivers/md/md-cluster.c kobject_uevent_env(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE, envp); mddev 526 drivers/md/md-cluster.c static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg) mddev 529 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 530 drivers/md/md-cluster.c mddev->good_device_nr = le32_to_cpu(msg->raid_slot); mddev 533 drivers/md/md-cluster.c wait_event(mddev->thread->wqueue, mddev 534 drivers/md/md-cluster.c (got_lock = mddev_trylock(mddev)) || mddev 536 drivers/md/md-cluster.c md_reload_sb(mddev, mddev->good_device_nr); mddev 538 drivers/md/md-cluster.c mddev_unlock(mddev); mddev 541 drivers/md/md-cluster.c static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg) mddev 546 drivers/md/md-cluster.c rdev = md_find_rdev_nr_rcu(mddev, le32_to_cpu(msg->raid_slot)); mddev 549 drivers/md/md-cluster.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 550 drivers/md/md-cluster.c md_wakeup_thread(mddev->thread); mddev 558 drivers/md/md-cluster.c static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg) mddev 563 drivers/md/md-cluster.c rdev = md_find_rdev_nr_rcu(mddev, le32_to_cpu(msg->raid_slot)); mddev 572 drivers/md/md-cluster.c static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) mddev 576 drivers/md/md-cluster.c if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot), mddev 581 drivers/md/md-cluster.c process_metadata_update(mddev, msg); mddev 584 drivers/md/md-cluster.c set_capacity(mddev->gendisk, mddev->array_sectors); mddev 585 drivers/md/md-cluster.c revalidate_disk(mddev->gendisk); mddev 588 drivers/md/md-cluster.c set_bit(MD_RESYNCING_REMOTE, &mddev->recovery); mddev 589 drivers/md/md-cluster.c process_suspend_info(mddev, le32_to_cpu(msg->slot), mddev 594 drivers/md/md-cluster.c process_add_new_disk(mddev, msg); mddev 597 drivers/md/md-cluster.c process_remove_disk(mddev, msg); mddev 600 drivers/md/md-cluster.c process_readd_disk(mddev, msg); mddev 603 drivers/md/md-cluster.c __recover_slot(mddev, le32_to_cpu(msg->slot)); mddev 606 drivers/md/md-cluster.c if (le64_to_cpu(msg->high) != mddev->pers->size(mddev, 0, 0)) mddev 607 drivers/md/md-cluster.c ret = md_bitmap_resize(mddev->bitmap, mddev 623 drivers/md/md-cluster.c struct md_cluster_info *cinfo = thread->mddev->cluster_info; mddev 639 drivers/md/md-cluster.c ret = process_recvd_msg(thread->mddev, &msg); mddev 670 drivers/md/md-cluster.c struct mddev *mddev = cinfo->mddev; mddev 683 drivers/md/md-cluster.c md_wakeup_thread(mddev->thread); mddev 793 drivers/md/md-cluster.c static int gather_all_resync_info(struct mddev *mddev, int total_slots) mddev 795 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 805 drivers/md/md-cluster.c bm_lockres = lockres_init(mddev, str, NULL, 1); mddev 816 drivers/md/md-cluster.c if (read_resync_info(mddev, bm_lockres)) { mddev 834 drivers/md/md-cluster.c ret = md_bitmap_copy_from_slot(mddev, i, &lo, &hi, false); mddev 840 drivers/md/md-cluster.c if ((hi > 0) && (lo < mddev->recovery_cp)) { mddev 841 drivers/md/md-cluster.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 842 drivers/md/md-cluster.c mddev->recovery_cp = lo; mddev 843 drivers/md/md-cluster.c md_check_recovery(mddev); mddev 852 drivers/md/md-cluster.c static int join(struct mddev *mddev, int nodes) mddev 869 drivers/md/md-cluster.c mddev->cluster_info = cinfo; mddev 870 drivers/md/md-cluster.c cinfo->mddev = mddev; mddev 873 drivers/md/md-cluster.c sprintf(str, "%pU", mddev->uuid); mddev 874 drivers/md/md-cluster.c ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name, mddev 876 drivers/md/md-cluster.c &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace); mddev 888 drivers/md/md-cluster.c cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv"); mddev 893 drivers/md/md-cluster.c cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1); mddev 896 drivers/md/md-cluster.c cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0); mddev 899 drivers/md/md-cluster.c cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0); mddev 909 drivers/md/md-cluster.c cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0); mddev 926 drivers/md/md-cluster.c cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1); mddev 937 drivers/md/md-cluster.c cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0); mddev 956 drivers/md/md-cluster.c mddev->cluster_info = NULL; mddev 961 drivers/md/md-cluster.c static void load_bitmaps(struct mddev *mddev, int total_slots) mddev 963 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 966 drivers/md/md-cluster.c if (gather_all_resync_info(mddev, total_slots)) mddev 974 drivers/md/md-cluster.c static void resync_bitmap(struct mddev *mddev) mddev 976 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 987 drivers/md/md-cluster.c static void unlock_all_bitmaps(struct mddev *mddev); mddev 988 drivers/md/md-cluster.c static int leave(struct mddev *mddev) mddev 990 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1003 drivers/md/md-cluster.c if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) || mddev 1004 drivers/md/md-cluster.c (mddev->reshape_position != MaxSector && mddev 1005 drivers/md/md-cluster.c test_bit(MD_CLOSING, &mddev->flags))) mddev 1006 drivers/md/md-cluster.c resync_bitmap(mddev); mddev 1017 drivers/md/md-cluster.c unlock_all_bitmaps(mddev); mddev 1027 drivers/md/md-cluster.c static int slot_number(struct mddev *mddev) mddev 1029 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1040 drivers/md/md-cluster.c static int metadata_update_start(struct mddev *mddev) mddev 1042 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1052 drivers/md/md-cluster.c md_wakeup_thread(mddev->thread); mddev 1069 drivers/md/md-cluster.c static int metadata_update_finish(struct mddev *mddev) mddev 1071 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1081 drivers/md/md-cluster.c rdev_for_each(rdev, mddev) mddev 1096 drivers/md/md-cluster.c static void metadata_update_cancel(struct mddev *mddev) mddev 1098 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1103 drivers/md/md-cluster.c static int update_bitmap_size(struct mddev *mddev, sector_t size) mddev 1105 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1118 drivers/md/md-cluster.c static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsize) mddev 1123 drivers/md/md-cluster.c struct bitmap *bitmap = mddev->bitmap; mddev 1131 drivers/md/md-cluster.c rv = update_bitmap_size(mddev, newsize); mddev 1135 drivers/md/md-cluster.c for (i = 0; i < mddev->bitmap_info.nodes; i++) { mddev 1136 drivers/md/md-cluster.c if (i == md_cluster_ops->slot_number(mddev)) mddev 1139 drivers/md/md-cluster.c bitmap = get_bitmap_from_slot(mddev, i); mddev 1151 drivers/md/md-cluster.c bm_lockres = lockres_init(mddev, str, NULL, 1); mddev 1173 drivers/md/md-cluster.c update_bitmap_size(mddev, oldsize); mddev 1180 drivers/md/md-cluster.c static int cluster_check_sync_size(struct mddev *mddev) mddev 1185 drivers/md/md-cluster.c int node_num = mddev->bitmap_info.nodes; mddev 1186 drivers/md/md-cluster.c int current_slot = md_cluster_ops->slot_number(mddev); mddev 1187 drivers/md/md-cluster.c struct bitmap *bitmap = mddev->bitmap; mddev 1199 drivers/md/md-cluster.c bitmap = get_bitmap_from_slot(mddev, i); mddev 1210 drivers/md/md-cluster.c bm_lockres = lockres_init(mddev, str, NULL, 1); mddev 1247 drivers/md/md-cluster.c static void update_size(struct mddev *mddev, sector_t old_dev_sectors) mddev 1249 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1255 drivers/md/md-cluster.c md_update_sb(mddev, 1); mddev 1260 drivers/md/md-cluster.c rdev_for_each(rdev, mddev) mddev 1290 drivers/md/md-cluster.c if (cluster_check_sync_size(mddev) == 0) { mddev 1297 drivers/md/md-cluster.c set_capacity(mddev->gendisk, mddev->array_sectors); mddev 1298 drivers/md/md-cluster.c revalidate_disk(mddev->gendisk); mddev 1301 drivers/md/md-cluster.c ret = mddev->pers->resize(mddev, old_dev_sectors); mddev 1303 drivers/md/md-cluster.c revalidate_disk(mddev->gendisk); mddev 1312 drivers/md/md-cluster.c static int resync_start(struct mddev *mddev) mddev 1314 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1315 drivers/md/md-cluster.c return dlm_lock_sync_interruptible(cinfo->resync_lockres, DLM_LOCK_EX, mddev); mddev 1318 drivers/md/md-cluster.c static void resync_info_get(struct mddev *mddev, sector_t *lo, sector_t *hi) mddev 1320 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1328 drivers/md/md-cluster.c static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi) mddev 1330 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1358 drivers/md/md-cluster.c static int resync_finish(struct mddev *mddev) mddev 1360 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1363 drivers/md/md-cluster.c clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery); mddev 1369 drivers/md/md-cluster.c if (!test_bit(MD_CLOSING, &mddev->flags)) mddev 1370 drivers/md/md-cluster.c ret = resync_info_update(mddev, 0, 0); mddev 1375 drivers/md/md-cluster.c static int area_resyncing(struct mddev *mddev, int direction, mddev 1378 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1396 drivers/md/md-cluster.c static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) mddev 1398 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1442 drivers/md/md-cluster.c static void add_new_disk_cancel(struct mddev *mddev) mddev 1444 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1449 drivers/md/md-cluster.c static int new_disk_ack(struct mddev *mddev, bool ack) mddev 1451 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1454 drivers/md/md-cluster.c pr_warn("md-cluster(%s): Spurious cluster confirmation\n", mdname(mddev)); mddev 1464 drivers/md/md-cluster.c static int remove_disk(struct mddev *mddev, struct md_rdev *rdev) mddev 1467 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1473 drivers/md/md-cluster.c static int lock_all_bitmaps(struct mddev *mddev) mddev 1477 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1480 drivers/md/md-cluster.c kcalloc(mddev->bitmap_info.nodes - 1, mddev 1487 drivers/md/md-cluster.c my_slot = slot_number(mddev); mddev 1488 drivers/md/md-cluster.c for (slot = 0; slot < mddev->bitmap_info.nodes; slot++) { mddev 1494 drivers/md/md-cluster.c cinfo->other_bitmap_lockres[i] = lockres_init(mddev, str, NULL, 1); mddev 1508 drivers/md/md-cluster.c static void unlock_all_bitmaps(struct mddev *mddev) mddev 1510 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1515 drivers/md/md-cluster.c for (i = 0; i < mddev->bitmap_info.nodes - 1; i++) { mddev 1529 drivers/md/md-cluster.c struct mddev *mddev = rdev->mddev; mddev 1530 drivers/md/md-cluster.c struct md_cluster_info *cinfo = mddev->cluster_info; mddev 1538 drivers/md/md-cluster.c for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) { mddev 1541 drivers/md/md-cluster.c err = md_bitmap_copy_from_slot(mddev, sn, &lo, &hi, false); mddev 1546 drivers/md/md-cluster.c if ((hi > 0) && (lo < mddev->recovery_cp)) mddev 1547 drivers/md/md-cluster.c mddev->recovery_cp = lo; mddev 9 drivers/md/md-cluster.h struct mddev; mddev 13 drivers/md/md-cluster.h int (*join)(struct mddev *mddev, int nodes); mddev 14 drivers/md/md-cluster.h int (*leave)(struct mddev *mddev); mddev 15 drivers/md/md-cluster.h int (*slot_number)(struct mddev *mddev); mddev 16 drivers/md/md-cluster.h int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi); mddev 17 drivers/md/md-cluster.h void (*resync_info_get)(struct mddev *mddev, sector_t *lo, sector_t *hi); mddev 18 drivers/md/md-cluster.h int (*metadata_update_start)(struct mddev *mddev); mddev 19 drivers/md/md-cluster.h int (*metadata_update_finish)(struct mddev *mddev); mddev 20 drivers/md/md-cluster.h void (*metadata_update_cancel)(struct mddev *mddev); mddev 21 drivers/md/md-cluster.h int (*resync_start)(struct mddev *mddev); mddev 22 drivers/md/md-cluster.h int (*resync_finish)(struct mddev *mddev); mddev 23 drivers/md/md-cluster.h int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi); mddev 24 drivers/md/md-cluster.h int (*add_new_disk)(struct mddev *mddev, struct md_rdev *rdev); mddev 25 drivers/md/md-cluster.h void (*add_new_disk_cancel)(struct mddev *mddev); mddev 26 drivers/md/md-cluster.h int (*new_disk_ack)(struct mddev *mddev, bool ack); mddev 27 drivers/md/md-cluster.h int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev); mddev 28 drivers/md/md-cluster.h void (*load_bitmaps)(struct mddev *mddev, int total_slots); mddev 30 drivers/md/md-cluster.h int (*resize_bitmaps)(struct mddev *mddev, sector_t newsize, sector_t oldsize); mddev 31 drivers/md/md-cluster.h int (*lock_all_bitmaps)(struct mddev *mddev); mddev 32 drivers/md/md-cluster.h void (*unlock_all_bitmaps)(struct mddev *mddev); mddev 33 drivers/md/md-cluster.h void (*update_size)(struct mddev *mddev, sector_t old_dev_sectors); mddev 164 drivers/md/md-faulty.c static bool faulty_make_request(struct mddev *mddev, struct bio *bio) mddev 166 drivers/md/md-faulty.c struct faulty_conf *conf = mddev->private; mddev 208 drivers/md/md-faulty.c struct bio *b = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); mddev 221 drivers/md/md-faulty.c static void faulty_status(struct seq_file *seq, struct mddev *mddev) mddev 223 drivers/md/md-faulty.c struct faulty_conf *conf = mddev->private; mddev 254 drivers/md/md-faulty.c static int faulty_reshape(struct mddev *mddev) mddev 256 drivers/md/md-faulty.c int mode = mddev->new_layout & ModeMask; mddev 257 drivers/md/md-faulty.c int count = mddev->new_layout >> ModeShift; mddev 258 drivers/md/md-faulty.c struct faulty_conf *conf = mddev->private; mddev 260 drivers/md/md-faulty.c if (mddev->new_layout < 0) mddev 278 drivers/md/md-faulty.c mddev->new_layout = -1; mddev 279 drivers/md/md-faulty.c mddev->layout = -1; /* makes sure further changes come through */ mddev 283 drivers/md/md-faulty.c static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks) mddev 289 drivers/md/md-faulty.c return mddev->dev_sectors; mddev 294 drivers/md/md-faulty.c static int faulty_run(struct mddev *mddev) mddev 300 drivers/md/md-faulty.c if (md_check_no_bitmap(mddev)) mddev 313 drivers/md/md-faulty.c rdev_for_each(rdev, mddev) { mddev 315 drivers/md/md-faulty.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 319 drivers/md/md-faulty.c md_set_array_sectors(mddev, faulty_size(mddev, 0, 0)); mddev 320 drivers/md/md-faulty.c mddev->private = conf; mddev 322 drivers/md/md-faulty.c faulty_reshape(mddev); mddev 327 drivers/md/md-faulty.c static void faulty_free(struct mddev *mddev, void *priv) mddev 24 drivers/md/md-linear.c static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) mddev 30 drivers/md/md-linear.c hi = mddev->raid_disks - 1; mddev 31 drivers/md/md-linear.c conf = mddev->private; mddev 55 drivers/md/md-linear.c static int linear_congested(struct mddev *mddev, int bits) mddev 61 drivers/md/md-linear.c conf = rcu_dereference(mddev->private); mddev 72 drivers/md/md-linear.c static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) mddev 77 drivers/md/md-linear.c conf = mddev->private; mddev 85 drivers/md/md-linear.c static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) mddev 99 drivers/md/md-linear.c rdev_for_each(rdev, mddev) { mddev 106 drivers/md/md-linear.c mdname(mddev)); mddev 111 drivers/md/md-linear.c if (mddev->chunk_sectors) { mddev 113 drivers/md/md-linear.c sector_div(sectors, mddev->chunk_sectors); mddev 114 drivers/md/md-linear.c rdev->sectors = sectors * mddev->chunk_sectors; mddev 117 drivers/md/md-linear.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 128 drivers/md/md-linear.c mdname(mddev)); mddev 133 drivers/md/md-linear.c blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); mddev 135 drivers/md/md-linear.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); mddev 167 drivers/md/md-linear.c static int linear_run (struct mddev *mddev) mddev 172 drivers/md/md-linear.c if (md_check_no_bitmap(mddev)) mddev 174 drivers/md/md-linear.c conf = linear_conf(mddev, mddev->raid_disks); mddev 178 drivers/md/md-linear.c mddev->private = conf; mddev 179 drivers/md/md-linear.c md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); mddev 181 drivers/md/md-linear.c ret = md_integrity_register(mddev); mddev 184 drivers/md/md-linear.c mddev->private = NULL; mddev 189 drivers/md/md-linear.c static int linear_add(struct mddev *mddev, struct md_rdev *rdev) mddev 201 drivers/md/md-linear.c if (rdev->saved_raid_disk != mddev->raid_disks) mddev 207 drivers/md/md-linear.c newconf = linear_conf(mddev,mddev->raid_disks+1); mddev 218 drivers/md/md-linear.c mddev_suspend(mddev); mddev 219 drivers/md/md-linear.c oldconf = rcu_dereference_protected(mddev->private, mddev 220 drivers/md/md-linear.c lockdep_is_held(&mddev->reconfig_mutex)); mddev 221 drivers/md/md-linear.c mddev->raid_disks++; mddev 222 drivers/md/md-linear.c WARN_ONCE(mddev->raid_disks != newconf->raid_disks, mddev 224 drivers/md/md-linear.c rcu_assign_pointer(mddev->private, newconf); mddev 225 drivers/md/md-linear.c md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); mddev 226 drivers/md/md-linear.c set_capacity(mddev->gendisk, mddev->array_sectors); mddev 227 drivers/md/md-linear.c mddev_resume(mddev); mddev 228 drivers/md/md-linear.c revalidate_disk(mddev->gendisk); mddev 233 drivers/md/md-linear.c static void linear_free(struct mddev *mddev, void *priv) mddev 240 drivers/md/md-linear.c static bool linear_make_request(struct mddev *mddev, struct bio *bio) mddev 248 drivers/md/md-linear.c && md_flush_request(mddev, bio)) mddev 251 drivers/md/md-linear.c tmp_dev = which_dev(mddev, bio_sector); mddev 268 drivers/md/md-linear.c GFP_NOIO, &mddev->bio_set); mddev 283 drivers/md/md-linear.c if (mddev->gendisk) mddev 285 drivers/md/md-linear.c bio, disk_devt(mddev->gendisk), mddev 287 drivers/md/md-linear.c mddev_check_writesame(mddev, bio); mddev 288 drivers/md/md-linear.c mddev_check_write_zeroes(mddev, bio); mddev 295 drivers/md/md-linear.c mdname(mddev), mddev 304 drivers/md/md-linear.c static void linear_status (struct seq_file *seq, struct mddev *mddev) mddev 306 drivers/md/md-linear.c seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); mddev 309 drivers/md/md-linear.c static void linear_quiesce(struct mddev *mddev, int state) mddev 54 drivers/md/md-multipath.c struct mddev *mddev = mp_bh->mddev; mddev 55 drivers/md/md-multipath.c struct mpconf *conf = mddev->private; mddev 60 drivers/md/md-multipath.c md_wakeup_thread(mddev->thread); mddev 71 drivers/md/md-multipath.c struct mpconf *conf = mp_bh->mddev->private; mddev 81 drivers/md/md-multipath.c struct mpconf *conf = mp_bh->mddev->private; mddev 91 drivers/md/md-multipath.c md_error (mp_bh->mddev, rdev); mddev 98 drivers/md/md-multipath.c rdev_dec_pending(rdev, conf->mddev); mddev 101 drivers/md/md-multipath.c static bool multipath_make_request(struct mddev *mddev, struct bio * bio) mddev 103 drivers/md/md-multipath.c struct mpconf *conf = mddev->private; mddev 108 drivers/md/md-multipath.c && md_flush_request(mddev, bio)) mddev 114 drivers/md/md-multipath.c mp_bh->mddev = mddev; mddev 132 drivers/md/md-multipath.c mddev_check_writesame(mddev, &mp_bh->bio); mddev 133 drivers/md/md-multipath.c mddev_check_write_zeroes(mddev, &mp_bh->bio); mddev 138 drivers/md/md-multipath.c static void multipath_status(struct seq_file *seq, struct mddev *mddev) mddev 140 drivers/md/md-multipath.c struct mpconf *conf = mddev->private; mddev 144 drivers/md/md-multipath.c conf->raid_disks - mddev->degraded); mddev 154 drivers/md/md-multipath.c static int multipath_congested(struct mddev *mddev, int bits) mddev 156 drivers/md/md-multipath.c struct mpconf *conf = mddev->private; mddev 160 drivers/md/md-multipath.c for (i = 0; i < mddev->raid_disks ; i++) { mddev 179 drivers/md/md-multipath.c static void multipath_error (struct mddev *mddev, struct md_rdev *rdev) mddev 181 drivers/md/md-multipath.c struct mpconf *conf = mddev->private; mddev 184 drivers/md/md-multipath.c if (conf->raid_disks - mddev->degraded <= 1) { mddev 200 drivers/md/md-multipath.c mddev->degraded++; mddev 204 drivers/md/md-multipath.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 208 drivers/md/md-multipath.c conf->raid_disks - mddev->degraded); mddev 221 drivers/md/md-multipath.c pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, mddev 234 drivers/md/md-multipath.c static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev) mddev 236 drivers/md/md-multipath.c struct mpconf *conf = mddev->private; mddev 241 drivers/md/md-multipath.c int last = mddev->raid_disks - 1; mddev 250 drivers/md/md-multipath.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 253 drivers/md/md-multipath.c err = md_integrity_add_rdev(rdev, mddev); mddev 257 drivers/md/md-multipath.c mddev->degraded--; mddev 271 drivers/md/md-multipath.c static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev) mddev 273 drivers/md/md-multipath.c struct mpconf *conf = mddev->private; mddev 297 drivers/md/md-multipath.c err = md_integrity_register(mddev); mddev 315 drivers/md/md-multipath.c struct mddev *mddev = thread->mddev; mddev 319 drivers/md/md-multipath.c struct mpconf *conf = mddev->private; mddev 322 drivers/md/md-multipath.c md_check_recovery(mddev); mddev 357 drivers/md/md-multipath.c static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks) mddev 362 drivers/md/md-multipath.c return mddev->dev_sectors; mddev 365 drivers/md/md-multipath.c static int multipath_run (struct mddev *mddev) mddev 374 drivers/md/md-multipath.c if (md_check_no_bitmap(mddev)) mddev 377 drivers/md/md-multipath.c if (mddev->level != LEVEL_MULTIPATH) { mddev 379 drivers/md/md-multipath.c mdname(mddev), mddev->level); mddev 389 drivers/md/md-multipath.c mddev->private = conf; mddev 393 drivers/md/md-multipath.c conf->multipaths = kcalloc(mddev->raid_disks, mddev 400 drivers/md/md-multipath.c rdev_for_each(rdev, mddev) { mddev 403 drivers/md/md-multipath.c disk_idx >= mddev->raid_disks) mddev 408 drivers/md/md-multipath.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 415 drivers/md/md-multipath.c conf->raid_disks = mddev->raid_disks; mddev 416 drivers/md/md-multipath.c conf->mddev = mddev; mddev 422 drivers/md/md-multipath.c mdname(mddev)); mddev 425 drivers/md/md-multipath.c mddev->degraded = conf->raid_disks - working_disks; mddev 432 drivers/md/md-multipath.c mddev->thread = md_register_thread(multipathd, mddev, mddev 434 drivers/md/md-multipath.c if (!mddev->thread) mddev 438 drivers/md/md-multipath.c mdname(mddev), conf->raid_disks - mddev->degraded, mddev 439 drivers/md/md-multipath.c mddev->raid_disks); mddev 443 drivers/md/md-multipath.c md_set_array_sectors(mddev, multipath_size(mddev, 0, 0)); mddev 445 drivers/md/md-multipath.c if (md_integrity_register(mddev)) mddev 454 drivers/md/md-multipath.c mddev->private = NULL; mddev 459 drivers/md/md-multipath.c static void multipath_free(struct mddev *mddev, void *priv) mddev 10 drivers/md/md-multipath.h struct mddev *mddev; mddev 26 drivers/md/md-multipath.h struct mddev *mddev; mddev 91 drivers/md/md.c static int remove_and_add_spares(struct mddev *mddev, mddev 93 drivers/md/md.c static void mddev_detach(struct mddev *mddev); mddev 116 drivers/md/md.c static inline int speed_min(struct mddev *mddev) mddev 118 drivers/md/md.c return mddev->sync_speed_min ? mddev 119 drivers/md/md.c mddev->sync_speed_min : sysctl_speed_limit_min; mddev 122 drivers/md/md.c static inline int speed_max(struct mddev *mddev) mddev 124 drivers/md/md.c return mddev->sync_speed_max ? mddev 125 drivers/md/md.c mddev->sync_speed_max : sysctl_speed_limit_max; mddev 145 drivers/md/md.c void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev, mddev 148 drivers/md/md.c if (mddev->bitmap_info.max_write_behind == 0) mddev 154 drivers/md/md.c if (mddev->wb_info_pool == NULL) { mddev 158 drivers/md/md.c mddev_suspend(mddev); mddev 160 drivers/md/md.c mddev->wb_info_pool = mempool_create_kmalloc_pool(NR_WB_INFOS, mddev 163 drivers/md/md.c if (!mddev->wb_info_pool) mddev 166 drivers/md/md.c mddev_resume(mddev); mddev 174 drivers/md/md.c static void mddev_destroy_wb_pool(struct mddev *mddev, struct md_rdev *rdev) mddev 179 drivers/md/md.c if (mddev->wb_info_pool) { mddev 186 drivers/md/md.c rdev_for_each(temp, mddev) mddev 191 drivers/md/md.c mddev_suspend(rdev->mddev); mddev 192 drivers/md/md.c mempool_destroy(mddev->wb_info_pool); mddev 193 drivers/md/md.c mddev->wb_info_pool = NULL; mddev 194 drivers/md/md.c mddev_resume(rdev->mddev); mddev 254 drivers/md/md.c struct mddev *mddev) mddev 256 drivers/md/md.c if (!mddev || !bioset_initialized(&mddev->bio_set)) mddev 259 drivers/md/md.c return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); mddev 263 drivers/md/md.c static struct bio *md_bio_alloc_sync(struct mddev *mddev) mddev 265 drivers/md/md.c if (!mddev || !bioset_initialized(&mddev->sync_set)) mddev 268 drivers/md/md.c return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); mddev 283 drivers/md/md.c void md_new_event(struct mddev *mddev) mddev 310 drivers/md/md.c mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ mddev 313 drivers/md/md.c _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ mddev 326 drivers/md/md.c static bool is_suspended(struct mddev *mddev, struct bio *bio) mddev 328 drivers/md/md.c if (mddev->suspended) mddev 332 drivers/md/md.c if (mddev->suspend_lo >= mddev->suspend_hi) mddev 334 drivers/md/md.c if (bio->bi_iter.bi_sector >= mddev->suspend_hi) mddev 336 drivers/md/md.c if (bio_end_sector(bio) < mddev->suspend_lo) mddev 341 drivers/md/md.c void md_handle_request(struct mddev *mddev, struct bio *bio) mddev 345 drivers/md/md.c if (is_suspended(mddev, bio)) { mddev 348 drivers/md/md.c prepare_to_wait(&mddev->sb_wait, &__wait, mddev 350 drivers/md/md.c if (!is_suspended(mddev, bio)) mddev 356 drivers/md/md.c finish_wait(&mddev->sb_wait, &__wait); mddev 358 drivers/md/md.c atomic_inc(&mddev->active_io); mddev 361 drivers/md/md.c if (!mddev->pers->make_request(mddev, bio)) { mddev 362 drivers/md/md.c atomic_dec(&mddev->active_io); mddev 363 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 367 drivers/md/md.c if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) mddev 368 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 376 drivers/md/md.c struct mddev *mddev = q->queuedata; mddev 379 drivers/md/md.c if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { mddev 386 drivers/md/md.c if (mddev == NULL || mddev->pers == NULL) { mddev 390 drivers/md/md.c if (mddev->ro == 1 && unlikely(rw == WRITE)) { mddev 405 drivers/md/md.c md_handle_request(mddev, bio); mddev 408 drivers/md/md.c part_stat_inc(&mddev->gendisk->part0, ios[sgrp]); mddev 409 drivers/md/md.c part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors); mddev 421 drivers/md/md.c void mddev_suspend(struct mddev *mddev) mddev 423 drivers/md/md.c WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); mddev 424 drivers/md/md.c lockdep_assert_held(&mddev->reconfig_mutex); mddev 425 drivers/md/md.c if (mddev->suspended++) mddev 428 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 429 drivers/md/md.c set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags); mddev 431 drivers/md/md.c wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); mddev 432 drivers/md/md.c mddev->pers->quiesce(mddev, 1); mddev 433 drivers/md/md.c clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags); mddev 434 drivers/md/md.c wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags)); mddev 436 drivers/md/md.c del_timer_sync(&mddev->safemode_timer); mddev 440 drivers/md/md.c void mddev_resume(struct mddev *mddev) mddev 442 drivers/md/md.c lockdep_assert_held(&mddev->reconfig_mutex); mddev 443 drivers/md/md.c if (--mddev->suspended) mddev 445 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 446 drivers/md/md.c mddev->pers->quiesce(mddev, 0); mddev 448 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 449 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 450 drivers/md/md.c md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ mddev 454 drivers/md/md.c int mddev_congested(struct mddev *mddev, int bits) mddev 456 drivers/md/md.c struct md_personality *pers = mddev->pers; mddev 460 drivers/md/md.c if (mddev->suspended) mddev 463 drivers/md/md.c ret = pers->congested(mddev, bits); mddev 470 drivers/md/md.c struct mddev *mddev = data; mddev 471 drivers/md/md.c return mddev_congested(mddev, bits); mddev 481 drivers/md/md.c struct mddev *mddev = rdev->mddev; mddev 483 drivers/md/md.c rdev_dec_pending(rdev, mddev); mddev 485 drivers/md/md.c if (atomic_dec_and_test(&mddev->flush_pending)) { mddev 487 drivers/md/md.c queue_work(md_wq, &mddev->flush_work); mddev 496 drivers/md/md.c struct mddev *mddev = container_of(ws, struct mddev, flush_work); mddev 499 drivers/md/md.c mddev->start_flush = ktime_get_boottime(); mddev 500 drivers/md/md.c INIT_WORK(&mddev->flush_work, md_submit_flush_data); mddev 501 drivers/md/md.c atomic_set(&mddev->flush_pending, 1); mddev 503 drivers/md/md.c rdev_for_each_rcu(rdev, mddev) mddev 514 drivers/md/md.c bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); mddev 519 drivers/md/md.c atomic_inc(&mddev->flush_pending); mddev 522 drivers/md/md.c rdev_dec_pending(rdev, mddev); mddev 525 drivers/md/md.c if (atomic_dec_and_test(&mddev->flush_pending)) mddev 526 drivers/md/md.c queue_work(md_wq, &mddev->flush_work); mddev 531 drivers/md/md.c struct mddev *mddev = container_of(ws, struct mddev, flush_work); mddev 532 drivers/md/md.c struct bio *bio = mddev->flush_bio; mddev 540 drivers/md/md.c mddev->last_flush = mddev->start_flush; mddev 541 drivers/md/md.c mddev->flush_bio = NULL; mddev 542 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 549 drivers/md/md.c md_handle_request(mddev, bio); mddev 559 drivers/md/md.c bool md_flush_request(struct mddev *mddev, struct bio *bio) mddev 562 drivers/md/md.c spin_lock_irq(&mddev->lock); mddev 563 drivers/md/md.c wait_event_lock_irq(mddev->sb_wait, mddev 564 drivers/md/md.c !mddev->flush_bio || mddev 565 drivers/md/md.c ktime_after(mddev->last_flush, start), mddev 566 drivers/md/md.c mddev->lock); mddev 567 drivers/md/md.c if (!ktime_after(mddev->last_flush, start)) { mddev 568 drivers/md/md.c WARN_ON(mddev->flush_bio); mddev 569 drivers/md/md.c mddev->flush_bio = bio; mddev 572 drivers/md/md.c spin_unlock_irq(&mddev->lock); mddev 575 drivers/md/md.c INIT_WORK(&mddev->flush_work, submit_flushes); mddev 576 drivers/md/md.c queue_work(md_wq, &mddev->flush_work); mddev 591 drivers/md/md.c static inline struct mddev *mddev_get(struct mddev *mddev) mddev 593 drivers/md/md.c atomic_inc(&mddev->active); mddev 594 drivers/md/md.c return mddev; mddev 599 drivers/md/md.c static void mddev_put(struct mddev *mddev) mddev 601 drivers/md/md.c if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) mddev 603 drivers/md/md.c if (!mddev->raid_disks && list_empty(&mddev->disks) && mddev 604 drivers/md/md.c mddev->ctime == 0 && !mddev->hold_active) { mddev 607 drivers/md/md.c list_del_init(&mddev->all_mddevs); mddev 614 drivers/md/md.c INIT_WORK(&mddev->del_work, mddev_delayed_delete); mddev 615 drivers/md/md.c queue_work(md_misc_wq, &mddev->del_work); mddev 622 drivers/md/md.c void mddev_init(struct mddev *mddev) mddev 624 drivers/md/md.c kobject_init(&mddev->kobj, &md_ktype); mddev 625 drivers/md/md.c mutex_init(&mddev->open_mutex); mddev 626 drivers/md/md.c mutex_init(&mddev->reconfig_mutex); mddev 627 drivers/md/md.c mutex_init(&mddev->bitmap_info.mutex); mddev 628 drivers/md/md.c INIT_LIST_HEAD(&mddev->disks); mddev 629 drivers/md/md.c INIT_LIST_HEAD(&mddev->all_mddevs); mddev 630 drivers/md/md.c timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); mddev 631 drivers/md/md.c atomic_set(&mddev->active, 1); mddev 632 drivers/md/md.c atomic_set(&mddev->openers, 0); mddev 633 drivers/md/md.c atomic_set(&mddev->active_io, 0); mddev 634 drivers/md/md.c spin_lock_init(&mddev->lock); mddev 635 drivers/md/md.c atomic_set(&mddev->flush_pending, 0); mddev 636 drivers/md/md.c init_waitqueue_head(&mddev->sb_wait); mddev 637 drivers/md/md.c init_waitqueue_head(&mddev->recovery_wait); mddev 638 drivers/md/md.c mddev->reshape_position = MaxSector; mddev 639 drivers/md/md.c mddev->reshape_backwards = 0; mddev 640 drivers/md/md.c mddev->last_sync_action = "none"; mddev 641 drivers/md/md.c mddev->resync_min = 0; mddev 642 drivers/md/md.c mddev->resync_max = MaxSector; mddev 643 drivers/md/md.c mddev->level = LEVEL_NONE; mddev 647 drivers/md/md.c static struct mddev *mddev_find(dev_t unit) mddev 649 drivers/md/md.c struct mddev *mddev, *new = NULL; mddev 658 drivers/md/md.c list_for_each_entry(mddev, &all_mddevs, all_mddevs) mddev 659 drivers/md/md.c if (mddev->unit == unit) { mddev 660 drivers/md/md.c mddev_get(mddev); mddev 663 drivers/md/md.c return mddev; mddev 691 drivers/md/md.c list_for_each_entry(mddev, &all_mddevs, all_mddevs) mddev 692 drivers/md/md.c if (mddev->unit == dev) { mddev 723 drivers/md/md.c void mddev_unlock(struct mddev *mddev) mddev 725 drivers/md/md.c if (mddev->to_remove) { mddev 738 drivers/md/md.c struct attribute_group *to_remove = mddev->to_remove; mddev 739 drivers/md/md.c mddev->to_remove = NULL; mddev 740 drivers/md/md.c mddev->sysfs_active = 1; mddev 741 drivers/md/md.c mutex_unlock(&mddev->reconfig_mutex); mddev 743 drivers/md/md.c if (mddev->kobj.sd) { mddev 745 drivers/md/md.c sysfs_remove_group(&mddev->kobj, to_remove); mddev 746 drivers/md/md.c if (mddev->pers == NULL || mddev 747 drivers/md/md.c mddev->pers->sync_request == NULL) { mddev 748 drivers/md/md.c sysfs_remove_group(&mddev->kobj, &md_redundancy_group); mddev 749 drivers/md/md.c if (mddev->sysfs_action) mddev 750 drivers/md/md.c sysfs_put(mddev->sysfs_action); mddev 751 drivers/md/md.c mddev->sysfs_action = NULL; mddev 754 drivers/md/md.c mddev->sysfs_active = 0; mddev 756 drivers/md/md.c mutex_unlock(&mddev->reconfig_mutex); mddev 762 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 763 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 768 drivers/md/md.c struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) mddev 772 drivers/md/md.c rdev_for_each_rcu(rdev, mddev) mddev 780 drivers/md/md.c static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) mddev 784 drivers/md/md.c rdev_for_each(rdev, mddev) mddev 791 drivers/md/md.c struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) mddev 795 drivers/md/md.c rdev_for_each_rcu(rdev, mddev) mddev 850 drivers/md/md.c struct mddev *mddev = rdev->mddev; mddev 854 drivers/md/md.c md_error(mddev, rdev); mddev 857 drivers/md/md.c set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); mddev 863 drivers/md/md.c if (atomic_dec_and_test(&mddev->pending_writes)) mddev 864 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 865 drivers/md/md.c rdev_dec_pending(rdev, mddev); mddev 869 drivers/md/md.c void md_super_write(struct mddev *mddev, struct md_rdev *rdev, mddev 887 drivers/md/md.c bio = md_bio_alloc_sync(mddev); mddev 897 drivers/md/md.c if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && mddev 903 drivers/md/md.c atomic_inc(&mddev->pending_writes); mddev 907 drivers/md/md.c int md_super_wait(struct mddev *mddev) mddev 910 drivers/md/md.c wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); mddev 911 drivers/md/md.c if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) mddev 919 drivers/md/md.c struct bio *bio = md_bio_alloc_sync(rdev->mddev); mddev 929 drivers/md/md.c else if (rdev->mddev->reshape_position != MaxSector && mddev 930 drivers/md/md.c (rdev->mddev->reshape_backwards == mddev 931 drivers/md/md.c (sector >= rdev->mddev->reshape_position))) mddev 1072 drivers/md/md.c int (*validate_super)(struct mddev *mddev, mddev 1074 drivers/md/md.c void (*sync_super)(struct mddev *mddev, mddev 1090 drivers/md/md.c int md_check_no_bitmap(struct mddev *mddev) mddev 1092 drivers/md/md.c if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) mddev 1095 drivers/md/md.c mdname(mddev), mddev->pers->name); mddev 1212 drivers/md/md.c static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) mddev 1224 drivers/md/md.c if (mddev->raid_disks == 0) { mddev 1225 drivers/md/md.c mddev->major_version = 0; mddev 1226 drivers/md/md.c mddev->minor_version = sb->minor_version; mddev 1227 drivers/md/md.c mddev->patch_version = sb->patch_version; mddev 1228 drivers/md/md.c mddev->external = 0; mddev 1229 drivers/md/md.c mddev->chunk_sectors = sb->chunk_size >> 9; mddev 1230 drivers/md/md.c mddev->ctime = sb->ctime; mddev 1231 drivers/md/md.c mddev->utime = sb->utime; mddev 1232 drivers/md/md.c mddev->level = sb->level; mddev 1233 drivers/md/md.c mddev->clevel[0] = 0; mddev 1234 drivers/md/md.c mddev->layout = sb->layout; mddev 1235 drivers/md/md.c mddev->raid_disks = sb->raid_disks; mddev 1236 drivers/md/md.c mddev->dev_sectors = ((sector_t)sb->size) * 2; mddev 1237 drivers/md/md.c mddev->events = ev1; mddev 1238 drivers/md/md.c mddev->bitmap_info.offset = 0; mddev 1239 drivers/md/md.c mddev->bitmap_info.space = 0; mddev 1241 drivers/md/md.c mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; mddev 1242 drivers/md/md.c mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); mddev 1243 drivers/md/md.c mddev->reshape_backwards = 0; mddev 1245 drivers/md/md.c if (mddev->minor_version >= 91) { mddev 1246 drivers/md/md.c mddev->reshape_position = sb->reshape_position; mddev 1247 drivers/md/md.c mddev->delta_disks = sb->delta_disks; mddev 1248 drivers/md/md.c mddev->new_level = sb->new_level; mddev 1249 drivers/md/md.c mddev->new_layout = sb->new_layout; mddev 1250 drivers/md/md.c mddev->new_chunk_sectors = sb->new_chunk >> 9; mddev 1251 drivers/md/md.c if (mddev->delta_disks < 0) mddev 1252 drivers/md/md.c mddev->reshape_backwards = 1; mddev 1254 drivers/md/md.c mddev->reshape_position = MaxSector; mddev 1255 drivers/md/md.c mddev->delta_disks = 0; mddev 1256 drivers/md/md.c mddev->new_level = mddev->level; mddev 1257 drivers/md/md.c mddev->new_layout = mddev->layout; mddev 1258 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 1260 drivers/md/md.c if (mddev->level == 0) mddev 1261 drivers/md/md.c mddev->layout = -1; mddev 1264 drivers/md/md.c mddev->recovery_cp = MaxSector; mddev 1268 drivers/md/md.c mddev->recovery_cp = sb->recovery_cp; mddev 1270 drivers/md/md.c mddev->recovery_cp = 0; mddev 1273 drivers/md/md.c memcpy(mddev->uuid+0, &sb->set_uuid0, 4); mddev 1274 drivers/md/md.c memcpy(mddev->uuid+4, &sb->set_uuid1, 4); mddev 1275 drivers/md/md.c memcpy(mddev->uuid+8, &sb->set_uuid2, 4); mddev 1276 drivers/md/md.c memcpy(mddev->uuid+12,&sb->set_uuid3, 4); mddev 1278 drivers/md/md.c mddev->max_disks = MD_SB_DISKS; mddev 1281 drivers/md/md.c mddev->bitmap_info.file == NULL) { mddev 1282 drivers/md/md.c mddev->bitmap_info.offset = mddev 1283 drivers/md/md.c mddev->bitmap_info.default_offset; mddev 1284 drivers/md/md.c mddev->bitmap_info.space = mddev 1285 drivers/md/md.c mddev->bitmap_info.default_space; mddev 1288 drivers/md/md.c } else if (mddev->pers == NULL) { mddev 1294 drivers/md/md.c if (ev1 < mddev->events) mddev 1296 drivers/md/md.c } else if (mddev->bitmap) { mddev 1300 drivers/md/md.c if (ev1 < mddev->bitmap->events_cleared) mddev 1302 drivers/md/md.c if (ev1 < mddev->events) mddev 1305 drivers/md/md.c if (ev1 < mddev->events) mddev 1310 drivers/md/md.c if (mddev->level != LEVEL_MULTIPATH) { mddev 1324 drivers/md/md.c if (mddev->minor_version >= 91) { mddev 1341 drivers/md/md.c static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) mddev 1345 drivers/md/md.c int next_spare = mddev->raid_disks; mddev 1367 drivers/md/md.c sb->major_version = mddev->major_version; mddev 1368 drivers/md/md.c sb->patch_version = mddev->patch_version; mddev 1370 drivers/md/md.c memcpy(&sb->set_uuid0, mddev->uuid+0, 4); mddev 1371 drivers/md/md.c memcpy(&sb->set_uuid1, mddev->uuid+4, 4); mddev 1372 drivers/md/md.c memcpy(&sb->set_uuid2, mddev->uuid+8, 4); mddev 1373 drivers/md/md.c memcpy(&sb->set_uuid3, mddev->uuid+12,4); mddev 1375 drivers/md/md.c sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); mddev 1376 drivers/md/md.c sb->level = mddev->level; mddev 1377 drivers/md/md.c sb->size = mddev->dev_sectors / 2; mddev 1378 drivers/md/md.c sb->raid_disks = mddev->raid_disks; mddev 1379 drivers/md/md.c sb->md_minor = mddev->md_minor; mddev 1381 drivers/md/md.c sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); mddev 1383 drivers/md/md.c sb->events_hi = (mddev->events>>32); mddev 1384 drivers/md/md.c sb->events_lo = (u32)mddev->events; mddev 1386 drivers/md/md.c if (mddev->reshape_position == MaxSector) mddev 1390 drivers/md/md.c sb->reshape_position = mddev->reshape_position; mddev 1391 drivers/md/md.c sb->new_level = mddev->new_level; mddev 1392 drivers/md/md.c sb->delta_disks = mddev->delta_disks; mddev 1393 drivers/md/md.c sb->new_layout = mddev->new_layout; mddev 1394 drivers/md/md.c sb->new_chunk = mddev->new_chunk_sectors << 9; mddev 1396 drivers/md/md.c mddev->minor_version = sb->minor_version; mddev 1397 drivers/md/md.c if (mddev->in_sync) mddev 1399 drivers/md/md.c sb->recovery_cp = mddev->recovery_cp; mddev 1400 drivers/md/md.c sb->cp_events_hi = (mddev->events>>32); mddev 1401 drivers/md/md.c sb->cp_events_lo = (u32)mddev->events; mddev 1402 drivers/md/md.c if (mddev->recovery_cp == MaxSector) mddev 1407 drivers/md/md.c sb->layout = mddev->layout; mddev 1408 drivers/md/md.c sb->chunk_size = mddev->chunk_sectors << 9; mddev 1410 drivers/md/md.c if (mddev->bitmap && mddev->bitmap_info.file == NULL) mddev 1414 drivers/md/md.c rdev_for_each(rdev2, mddev) { mddev 1462 drivers/md/md.c for (i=0 ; i < mddev->raid_disks ; i++) { mddev 1488 drivers/md/md.c if (num_sectors && num_sectors < rdev->mddev->dev_sectors) mddev 1490 drivers/md/md.c if (rdev->mddev->bitmap_info.offset) mddev 1498 drivers/md/md.c if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) mddev 1501 drivers/md/md.c md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, mddev 1503 drivers/md/md.c } while (md_super_wait(rdev->mddev) < 0); mddev 1727 drivers/md/md.c static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) mddev 1738 drivers/md/md.c if (mddev->raid_disks == 0) { mddev 1739 drivers/md/md.c mddev->major_version = 1; mddev 1740 drivers/md/md.c mddev->patch_version = 0; mddev 1741 drivers/md/md.c mddev->external = 0; mddev 1742 drivers/md/md.c mddev->chunk_sectors = le32_to_cpu(sb->chunksize); mddev 1743 drivers/md/md.c mddev->ctime = le64_to_cpu(sb->ctime); mddev 1744 drivers/md/md.c mddev->utime = le64_to_cpu(sb->utime); mddev 1745 drivers/md/md.c mddev->level = le32_to_cpu(sb->level); mddev 1746 drivers/md/md.c mddev->clevel[0] = 0; mddev 1747 drivers/md/md.c mddev->layout = le32_to_cpu(sb->layout); mddev 1748 drivers/md/md.c mddev->raid_disks = le32_to_cpu(sb->raid_disks); mddev 1749 drivers/md/md.c mddev->dev_sectors = le64_to_cpu(sb->size); mddev 1750 drivers/md/md.c mddev->events = ev1; mddev 1751 drivers/md/md.c mddev->bitmap_info.offset = 0; mddev 1752 drivers/md/md.c mddev->bitmap_info.space = 0; mddev 1756 drivers/md/md.c mddev->bitmap_info.default_offset = 1024 >> 9; mddev 1757 drivers/md/md.c mddev->bitmap_info.default_space = (4096-1024) >> 9; mddev 1758 drivers/md/md.c mddev->reshape_backwards = 0; mddev 1760 drivers/md/md.c mddev->recovery_cp = le64_to_cpu(sb->resync_offset); mddev 1761 drivers/md/md.c memcpy(mddev->uuid, sb->set_uuid, 16); mddev 1763 drivers/md/md.c mddev->max_disks = (4096-256)/2; mddev 1766 drivers/md/md.c mddev->bitmap_info.file == NULL) { mddev 1767 drivers/md/md.c mddev->bitmap_info.offset = mddev 1774 drivers/md/md.c if (mddev->minor_version > 0) mddev 1775 drivers/md/md.c mddev->bitmap_info.space = 0; mddev 1776 drivers/md/md.c else if (mddev->bitmap_info.offset > 0) mddev 1777 drivers/md/md.c mddev->bitmap_info.space = mddev 1778 drivers/md/md.c 8 - mddev->bitmap_info.offset; mddev 1780 drivers/md/md.c mddev->bitmap_info.space = mddev 1781 drivers/md/md.c -mddev->bitmap_info.offset; mddev 1785 drivers/md/md.c mddev->reshape_position = le64_to_cpu(sb->reshape_position); mddev 1786 drivers/md/md.c mddev->delta_disks = le32_to_cpu(sb->delta_disks); mddev 1787 drivers/md/md.c mddev->new_level = le32_to_cpu(sb->new_level); mddev 1788 drivers/md/md.c mddev->new_layout = le32_to_cpu(sb->new_layout); mddev 1789 drivers/md/md.c mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); mddev 1790 drivers/md/md.c if (mddev->delta_disks < 0 || mddev 1791 drivers/md/md.c (mddev->delta_disks == 0 && mddev 1794 drivers/md/md.c mddev->reshape_backwards = 1; mddev 1796 drivers/md/md.c mddev->reshape_position = MaxSector; mddev 1797 drivers/md/md.c mddev->delta_disks = 0; mddev 1798 drivers/md/md.c mddev->new_level = mddev->level; mddev 1799 drivers/md/md.c mddev->new_layout = mddev->layout; mddev 1800 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 1803 drivers/md/md.c if (mddev->level == 0 && mddev 1805 drivers/md/md.c mddev->layout = -1; mddev 1808 drivers/md/md.c set_bit(MD_HAS_JOURNAL, &mddev->flags); mddev 1819 drivers/md/md.c set_bit(MD_HAS_PPL, &mddev->flags); mddev 1821 drivers/md/md.c } else if (mddev->pers == NULL) { mddev 1829 drivers/md/md.c if (ev1 < mddev->events) mddev 1831 drivers/md/md.c } else if (mddev->bitmap) { mddev 1835 drivers/md/md.c if (ev1 < mddev->bitmap->events_cleared) mddev 1837 drivers/md/md.c if (ev1 < mddev->events) mddev 1840 drivers/md/md.c if (ev1 < mddev->events) mddev 1844 drivers/md/md.c if (mddev->level != LEVEL_MULTIPATH) { mddev 1882 drivers/md/md.c &mddev->recovery)) mddev 1900 drivers/md/md.c static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) mddev 1914 drivers/md/md.c sb->utime = cpu_to_le64((__u64)mddev->utime); mddev 1915 drivers/md/md.c sb->events = cpu_to_le64(mddev->events); mddev 1916 drivers/md/md.c if (mddev->in_sync) mddev 1917 drivers/md/md.c sb->resync_offset = cpu_to_le64(mddev->recovery_cp); mddev 1918 drivers/md/md.c else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) mddev 1925 drivers/md/md.c sb->raid_disks = cpu_to_le32(mddev->raid_disks); mddev 1926 drivers/md/md.c sb->size = cpu_to_le64(mddev->dev_sectors); mddev 1927 drivers/md/md.c sb->chunksize = cpu_to_le32(mddev->chunk_sectors); mddev 1928 drivers/md/md.c sb->level = cpu_to_le32(mddev->level); mddev 1929 drivers/md/md.c sb->layout = cpu_to_le32(mddev->layout); mddev 1942 drivers/md/md.c if (mddev->bitmap && mddev->bitmap_info.file == NULL) { mddev 1943 drivers/md/md.c sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); mddev 1953 drivers/md/md.c if (rdev->saved_raid_disk >= 0 && mddev->bitmap) mddev 1964 drivers/md/md.c if (mddev->reshape_position != MaxSector) { mddev 1966 drivers/md/md.c sb->reshape_position = cpu_to_le64(mddev->reshape_position); mddev 1967 drivers/md/md.c sb->new_layout = cpu_to_le32(mddev->new_layout); mddev 1968 drivers/md/md.c sb->delta_disks = cpu_to_le32(mddev->delta_disks); mddev 1969 drivers/md/md.c sb->new_level = cpu_to_le32(mddev->new_level); mddev 1970 drivers/md/md.c sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); mddev 1971 drivers/md/md.c if (mddev->delta_disks == 0 && mddev 1972 drivers/md/md.c mddev->reshape_backwards) mddev 1983 drivers/md/md.c if (mddev_is_clustered(mddev)) mddev 1990 drivers/md/md.c md_error(mddev, rdev); mddev 2021 drivers/md/md.c rdev_for_each(rdev2, mddev) mddev 2038 drivers/md/md.c if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) mddev 2041 drivers/md/md.c if (test_bit(MD_HAS_PPL, &mddev->flags)) { mddev 2042 drivers/md/md.c if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) mddev 2051 drivers/md/md.c rdev_for_each(rdev2, mddev) { mddev 2073 drivers/md/md.c if (num_sectors && num_sectors < rdev->mddev->dev_sectors) mddev 2083 drivers/md/md.c } else if (rdev->mddev->bitmap_info.offset) { mddev 2101 drivers/md/md.c md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, mddev 2103 drivers/md/md.c } while (md_super_wait(rdev->mddev) < 0); mddev 2119 drivers/md/md.c if (rdev->mddev->minor_version == 0) mddev 2130 drivers/md/md.c bitmap = rdev->mddev->bitmap; mddev 2131 drivers/md/md.c if (bitmap && !rdev->mddev->bitmap_info.file && mddev 2132 drivers/md/md.c rdev->sb_start + rdev->mddev->bitmap_info.offset + mddev 2162 drivers/md/md.c static void sync_super(struct mddev *mddev, struct md_rdev *rdev) mddev 2164 drivers/md/md.c if (mddev->sync_super) { mddev 2165 drivers/md/md.c mddev->sync_super(mddev, rdev); mddev 2169 drivers/md/md.c BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); mddev 2171 drivers/md/md.c super_types[mddev->major_version].sync_super(mddev, rdev); mddev 2174 drivers/md/md.c static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) mddev 2209 drivers/md/md.c int md_integrity_register(struct mddev *mddev) mddev 2213 drivers/md/md.c if (list_empty(&mddev->disks)) mddev 2215 drivers/md/md.c if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) mddev 2217 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 2239 drivers/md/md.c blk_integrity_register(mddev->gendisk, mddev 2242 drivers/md/md.c pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); mddev 2243 drivers/md/md.c if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE)) { mddev 2245 drivers/md/md.c mdname(mddev)); mddev 2256 drivers/md/md.c int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) mddev 2261 drivers/md/md.c if (!mddev->gendisk) mddev 2264 drivers/md/md.c bi_mddev = blk_get_integrity(mddev->gendisk); mddev 2269 drivers/md/md.c if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) { mddev 2271 drivers/md/md.c mdname(mddev), bdevname(rdev->bdev, name)); mddev 2279 drivers/md/md.c static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) mddev 2286 drivers/md/md.c if (find_rdev(mddev, rdev->bdev->bd_dev)) mddev 2290 drivers/md/md.c mddev->pers) mddev 2296 drivers/md/md.c (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { mddev 2297 drivers/md/md.c if (mddev->pers) { mddev 2302 drivers/md/md.c if (mddev->level > 0) mddev 2305 drivers/md/md.c mddev->dev_sectors = rdev->sectors; mddev 2315 drivers/md/md.c if (mddev->pers) mddev 2316 drivers/md/md.c choice = mddev->raid_disks; mddev 2317 drivers/md/md.c while (md_find_rdev_nr_rcu(mddev, choice)) mddev 2321 drivers/md/md.c if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { mddev 2328 drivers/md/md.c mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { mddev 2330 drivers/md/md.c mdname(mddev), mddev->max_disks); mddev 2336 drivers/md/md.c rdev->mddev = mddev; mddev 2339 drivers/md/md.c if (mddev->raid_disks) mddev 2340 drivers/md/md.c mddev_create_wb_pool(mddev, rdev, false); mddev 2342 drivers/md/md.c if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) mddev 2350 drivers/md/md.c list_add_rcu(&rdev->same_set, &mddev->disks); mddev 2351 drivers/md/md.c bd_link_disk_holder(rdev->bdev, mddev->gendisk); mddev 2354 drivers/md/md.c mddev->recovery_disabled++; mddev 2360 drivers/md/md.c b, mdname(mddev)); mddev 2375 drivers/md/md.c bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); mddev 2378 drivers/md/md.c mddev_destroy_wb_pool(rdev->mddev, rdev); mddev 2379 drivers/md/md.c rdev->mddev = NULL; mddev 2445 drivers/md/md.c static void export_array(struct mddev *mddev) mddev 2449 drivers/md/md.c while (!list_empty(&mddev->disks)) { mddev 2450 drivers/md/md.c rdev = list_first_entry(&mddev->disks, struct md_rdev, mddev 2454 drivers/md/md.c mddev->raid_disks = 0; mddev 2455 drivers/md/md.c mddev->major_version = 0; mddev 2458 drivers/md/md.c static bool set_in_sync(struct mddev *mddev) mddev 2460 drivers/md/md.c lockdep_assert_held(&mddev->lock); mddev 2461 drivers/md/md.c if (!mddev->in_sync) { mddev 2462 drivers/md/md.c mddev->sync_checkers++; mddev 2463 drivers/md/md.c spin_unlock(&mddev->lock); mddev 2464 drivers/md/md.c percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); mddev 2465 drivers/md/md.c spin_lock(&mddev->lock); mddev 2466 drivers/md/md.c if (!mddev->in_sync && mddev 2467 drivers/md/md.c percpu_ref_is_zero(&mddev->writes_pending)) { mddev 2468 drivers/md/md.c mddev->in_sync = 1; mddev 2474 drivers/md/md.c set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); mddev 2475 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 2477 drivers/md/md.c if (--mddev->sync_checkers == 0) mddev 2478 drivers/md/md.c percpu_ref_switch_to_percpu(&mddev->writes_pending); mddev 2480 drivers/md/md.c if (mddev->safemode == 1) mddev 2481 drivers/md/md.c mddev->safemode = 0; mddev 2482 drivers/md/md.c return mddev->in_sync; mddev 2485 drivers/md/md.c static void sync_sbs(struct mddev *mddev, int nospares) mddev 2494 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 2495 drivers/md/md.c if (rdev->sb_events == mddev->events || mddev 2498 drivers/md/md.c rdev->sb_events+1 == mddev->events)) { mddev 2502 drivers/md/md.c sync_super(mddev, rdev); mddev 2508 drivers/md/md.c static bool does_sb_need_changing(struct mddev *mddev) mddev 2515 drivers/md/md.c rdev_for_each(rdev, mddev) mddev 2525 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 2537 drivers/md/md.c if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || mddev 2538 drivers/md/md.c (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || mddev 2539 drivers/md/md.c (mddev->layout != le32_to_cpu(sb->layout)) || mddev 2540 drivers/md/md.c (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || mddev 2541 drivers/md/md.c (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) mddev 2547 drivers/md/md.c void md_update_sb(struct mddev *mddev, int force_change) mddev 2555 drivers/md/md.c if (mddev->ro) { mddev 2557 drivers/md/md.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 2562 drivers/md/md.c if (mddev_is_clustered(mddev)) { mddev 2563 drivers/md/md.c if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) mddev 2565 drivers/md/md.c if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) mddev 2567 drivers/md/md.c ret = md_cluster_ops->metadata_update_start(mddev); mddev 2569 drivers/md/md.c if (!does_sb_need_changing(mddev)) { mddev 2571 drivers/md/md.c md_cluster_ops->metadata_update_cancel(mddev); mddev 2572 drivers/md/md.c bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), mddev 2585 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 2587 drivers/md/md.c mddev->delta_disks >= 0 && mddev 2588 drivers/md/md.c test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && mddev 2589 drivers/md/md.c test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && mddev 2590 drivers/md/md.c !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev 2593 drivers/md/md.c mddev->curr_resync_completed > rdev->recovery_offset) mddev 2594 drivers/md/md.c rdev->recovery_offset = mddev->curr_resync_completed; mddev 2597 drivers/md/md.c if (!mddev->persistent) { mddev 2598 drivers/md/md.c clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); mddev 2599 drivers/md/md.c clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 2600 drivers/md/md.c if (!mddev->external) { mddev 2601 drivers/md/md.c clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); mddev 2602 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 2606 drivers/md/md.c md_error(mddev, rdev); mddev 2613 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 2617 drivers/md/md.c spin_lock(&mddev->lock); mddev 2619 drivers/md/md.c mddev->utime = ktime_get_real_seconds(); mddev 2621 drivers/md/md.c if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) mddev 2623 drivers/md/md.c if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) mddev 2631 drivers/md/md.c if (mddev->degraded) mddev 2643 drivers/md/md.c sync_req = mddev->in_sync; mddev 2648 drivers/md/md.c && (mddev->in_sync && mddev->recovery_cp == MaxSector) mddev 2649 drivers/md/md.c && mddev->can_decrease_events mddev 2650 drivers/md/md.c && mddev->events != 1) { mddev 2651 drivers/md/md.c mddev->events--; mddev 2652 drivers/md/md.c mddev->can_decrease_events = 0; mddev 2655 drivers/md/md.c mddev->events ++; mddev 2656 drivers/md/md.c mddev->can_decrease_events = nospares; mddev 2664 drivers/md/md.c WARN_ON(mddev->events == 0); mddev 2666 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 2673 drivers/md/md.c sync_sbs(mddev, nospares); mddev 2674 drivers/md/md.c spin_unlock(&mddev->lock); mddev 2677 drivers/md/md.c mdname(mddev), mddev->in_sync); mddev 2679 drivers/md/md.c if (mddev->queue) mddev 2680 drivers/md/md.c blk_add_trace_msg(mddev->queue, "md md_update_sb"); mddev 2682 drivers/md/md.c md_bitmap_update_sb(mddev->bitmap); mddev 2683 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 2690 drivers/md/md.c md_super_write(mddev,rdev, mddev 2696 drivers/md/md.c rdev->sb_events = mddev->events; mddev 2698 drivers/md/md.c md_super_write(mddev, rdev, mddev 2709 drivers/md/md.c if (mddev->level == LEVEL_MULTIPATH) mddev 2713 drivers/md/md.c if (md_super_wait(mddev) < 0) mddev 2717 drivers/md/md.c if (mddev_is_clustered(mddev) && ret == 0) mddev 2718 drivers/md/md.c md_cluster_ops->metadata_update_finish(mddev); mddev 2720 drivers/md/md.c if (mddev->in_sync != sync_req || mddev 2721 drivers/md/md.c !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), mddev 2725 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 2726 drivers/md/md.c if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) mddev 2727 drivers/md/md.c sysfs_notify(&mddev->kobj, NULL, "sync_completed"); mddev 2729 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 2743 drivers/md/md.c struct mddev *mddev = rdev->mddev; mddev 2747 drivers/md/md.c if (!mddev->pers->hot_remove_disk || add_journal) { mddev 2752 drivers/md/md.c super_types[mddev->major_version]. mddev 2753 drivers/md/md.c validate_super(mddev, rdev); mddev 2755 drivers/md/md.c mddev_suspend(mddev); mddev 2756 drivers/md/md.c err = mddev->pers->hot_add_disk(mddev, rdev); mddev 2758 drivers/md/md.c mddev_resume(mddev); mddev 2766 drivers/md/md.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 2767 drivers/md/md.c if (mddev->degraded) mddev 2768 drivers/md/md.c set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); mddev 2769 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 2770 drivers/md/md.c md_new_event(mddev); mddev 2771 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 2861 drivers/md/md.c if (cmd_match(buf, "faulty") && rdev->mddev->pers) { mddev 2862 drivers/md/md.c md_error(rdev->mddev, rdev); mddev 2868 drivers/md/md.c if (rdev->mddev->pers) { mddev 2870 drivers/md/md.c remove_and_add_spares(rdev->mddev, rdev); mddev 2875 drivers/md/md.c struct mddev *mddev = rdev->mddev; mddev 2877 drivers/md/md.c if (mddev_is_clustered(mddev)) mddev 2878 drivers/md/md.c err = md_cluster_ops->remove_disk(mddev, rdev); mddev 2882 drivers/md/md.c if (mddev->pers) { mddev 2883 drivers/md/md.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 2884 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 2886 drivers/md/md.c md_new_event(mddev); mddev 2891 drivers/md/md.c mddev_create_wb_pool(rdev->mddev, rdev, false); mddev 2894 drivers/md/md.c mddev_destroy_wb_pool(rdev->mddev, rdev); mddev 2907 drivers/md/md.c md_error(rdev->mddev, rdev); mddev 2912 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); mddev 2913 drivers/md/md.c md_wakeup_thread(rdev->mddev->thread); mddev 2927 drivers/md/md.c if (rdev->mddev->pers == NULL) { mddev 2948 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); mddev 2949 drivers/md/md.c md_wakeup_thread(rdev->mddev->thread); mddev 2962 drivers/md/md.c if (rdev->mddev->pers) mddev 2970 drivers/md/md.c if (rdev->mddev->pers) mddev 2977 drivers/md/md.c if (!rdev->mddev->pers) mddev 2987 drivers/md/md.c if (!mddev_is_clustered(rdev->mddev) || mddev 2994 drivers/md/md.c } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { mddev 2998 drivers/md/md.c } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { mddev 3056 drivers/md/md.c if (rdev->mddev->pers && slot == -1) { mddev 3067 drivers/md/md.c if (rdev->mddev->pers->hot_remove_disk == NULL) mddev 3070 drivers/md/md.c remove_and_add_spares(rdev->mddev, rdev); mddev 3073 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); mddev 3074 drivers/md/md.c md_wakeup_thread(rdev->mddev->thread); mddev 3075 drivers/md/md.c } else if (rdev->mddev->pers) { mddev 3084 drivers/md/md.c if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) mddev 3087 drivers/md/md.c if (rdev->mddev->pers->hot_add_disk == NULL) mddev 3090 drivers/md/md.c if (slot >= rdev->mddev->raid_disks && mddev 3091 drivers/md/md.c slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) mddev 3101 drivers/md/md.c err = rdev->mddev->pers-> mddev 3102 drivers/md/md.c hot_add_disk(rdev->mddev, rdev); mddev 3108 drivers/md/md.c if (sysfs_link_rdev(rdev->mddev, rdev)) mddev 3112 drivers/md/md.c if (slot >= rdev->mddev->raid_disks && mddev 3113 drivers/md/md.c slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) mddev 3140 drivers/md/md.c if (rdev->mddev->pers && rdev->raid_disk >= 0) mddev 3142 drivers/md/md.c if (rdev->sectors && rdev->mddev->external) mddev 3164 drivers/md/md.c struct mddev *mddev = rdev->mddev; mddev 3169 drivers/md/md.c if (mddev->sync_thread || mddev 3170 drivers/md/md.c test_bit(MD_RECOVERY_RUNNING,&mddev->recovery)) mddev 3178 drivers/md/md.c + mddev->dev_sectors > rdev->sectors) mddev 3187 drivers/md/md.c mddev->reshape_backwards) mddev 3194 drivers/md/md.c !mddev->reshape_backwards) mddev 3197 drivers/md/md.c if (mddev->pers && mddev->persistent && mddev 3198 drivers/md/md.c !super_types[mddev->major_version] mddev 3203 drivers/md/md.c mddev->reshape_backwards = 1; mddev 3205 drivers/md/md.c mddev->reshape_backwards = 0; mddev 3250 drivers/md/md.c struct mddev *my_mddev = rdev->mddev; mddev 3284 drivers/md/md.c struct mddev *mddev; mddev 3289 drivers/md/md.c for_each_mddev(mddev, tmp) { mddev 3292 drivers/md/md.c rdev_for_each(rdev2, mddev) mddev 3302 drivers/md/md.c mddev_put(mddev); mddev 3344 drivers/md/md.c if (rdev->mddev->pers && mddev 3412 drivers/md/md.c if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && mddev 3416 drivers/md/md.c if (rdev->mddev->persistent) { mddev 3417 drivers/md/md.c if (rdev->mddev->major_version == 0) mddev 3425 drivers/md/md.c } else if (!rdev->mddev->external) { mddev 3449 drivers/md/md.c if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && mddev 3453 drivers/md/md.c if (rdev->mddev->persistent) { mddev 3454 drivers/md/md.c if (rdev->mddev->major_version == 0) mddev 3458 drivers/md/md.c } else if (!rdev->mddev->external) { mddev 3490 drivers/md/md.c if (!rdev->mddev) mddev 3502 drivers/md/md.c struct mddev *mddev = rdev->mddev; mddev 3508 drivers/md/md.c rv = mddev ? mddev_lock(mddev) : -ENODEV; mddev 3510 drivers/md/md.c if (rdev->mddev == NULL) mddev 3514 drivers/md/md.c mddev_unlock(mddev); mddev 3632 drivers/md/md.c static int analyze_sbs(struct mddev *mddev) mddev 3639 drivers/md/md.c rdev_for_each_safe(rdev, tmp, mddev) mddev 3640 drivers/md/md.c switch (super_types[mddev->major_version]. mddev 3641 drivers/md/md.c load_super(rdev, freshest, mddev->minor_version)) { mddev 3659 drivers/md/md.c super_types[mddev->major_version]. mddev 3660 drivers/md/md.c validate_super(mddev, freshest); mddev 3663 drivers/md/md.c rdev_for_each_safe(rdev, tmp, mddev) { mddev 3664 drivers/md/md.c if (mddev->max_disks && mddev 3665 drivers/md/md.c (rdev->desc_nr >= mddev->max_disks || mddev 3666 drivers/md/md.c i > mddev->max_disks)) { mddev 3668 drivers/md/md.c mdname(mddev), bdevname(rdev->bdev, b), mddev 3669 drivers/md/md.c mddev->max_disks); mddev 3674 drivers/md/md.c if (super_types[mddev->major_version]. mddev 3675 drivers/md/md.c validate_super(mddev, rdev)) { mddev 3682 drivers/md/md.c if (mddev->level == LEVEL_MULTIPATH) { mddev 3687 drivers/md/md.c (mddev->raid_disks - min(0, mddev->delta_disks)) && mddev 3734 drivers/md/md.c safe_delay_show(struct mddev *mddev, char *page) mddev 3736 drivers/md/md.c int msec = (mddev->safemode_delay*1000)/HZ; mddev 3740 drivers/md/md.c safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) mddev 3744 drivers/md/md.c if (mddev_is_clustered(mddev)) { mddev 3752 drivers/md/md.c mddev->safemode_delay = 0; mddev 3754 drivers/md/md.c unsigned long old_delay = mddev->safemode_delay; mddev 3759 drivers/md/md.c mddev->safemode_delay = new_delay; mddev 3761 drivers/md/md.c mod_timer(&mddev->safemode_timer, jiffies+1); mddev 3769 drivers/md/md.c level_show(struct mddev *mddev, char *page) mddev 3773 drivers/md/md.c spin_lock(&mddev->lock); mddev 3774 drivers/md/md.c p = mddev->pers; mddev 3777 drivers/md/md.c else if (mddev->clevel[0]) mddev 3778 drivers/md/md.c ret = sprintf(page, "%s\n", mddev->clevel); mddev 3779 drivers/md/md.c else if (mddev->level != LEVEL_NONE) mddev 3780 drivers/md/md.c ret = sprintf(page, "%d\n", mddev->level); mddev 3783 drivers/md/md.c spin_unlock(&mddev->lock); mddev 3788 drivers/md/md.c level_store(struct mddev *mddev, const char *buf, size_t len) mddev 3801 drivers/md/md.c rv = mddev_lock(mddev); mddev 3805 drivers/md/md.c if (mddev->pers == NULL) { mddev 3806 drivers/md/md.c strncpy(mddev->clevel, buf, slen); mddev 3807 drivers/md/md.c if (mddev->clevel[slen-1] == '\n') mddev 3809 drivers/md/md.c mddev->clevel[slen] = 0; mddev 3810 drivers/md/md.c mddev->level = LEVEL_NONE; mddev 3815 drivers/md/md.c if (mddev->ro) mddev 3825 drivers/md/md.c if (mddev->sync_thread || mddev 3826 drivers/md/md.c test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || mddev 3827 drivers/md/md.c mddev->reshape_position != MaxSector || mddev 3828 drivers/md/md.c mddev->sysfs_active) mddev 3832 drivers/md/md.c if (!mddev->pers->quiesce) { mddev 3834 drivers/md/md.c mdname(mddev), mddev->pers->name); mddev 3858 drivers/md/md.c if (pers == mddev->pers) { mddev 3867 drivers/md/md.c mdname(mddev), clevel); mddev 3872 drivers/md/md.c rdev_for_each(rdev, mddev) mddev 3878 drivers/md/md.c priv = pers->takeover(mddev); mddev 3880 drivers/md/md.c mddev->new_level = mddev->level; mddev 3881 drivers/md/md.c mddev->new_layout = mddev->layout; mddev 3882 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 3883 drivers/md/md.c mddev->raid_disks -= mddev->delta_disks; mddev 3884 drivers/md/md.c mddev->delta_disks = 0; mddev 3885 drivers/md/md.c mddev->reshape_backwards = 0; mddev 3888 drivers/md/md.c mdname(mddev), clevel); mddev 3894 drivers/md/md.c mddev_suspend(mddev); mddev 3895 drivers/md/md.c mddev_detach(mddev); mddev 3897 drivers/md/md.c spin_lock(&mddev->lock); mddev 3898 drivers/md/md.c oldpers = mddev->pers; mddev 3899 drivers/md/md.c oldpriv = mddev->private; mddev 3900 drivers/md/md.c mddev->pers = pers; mddev 3901 drivers/md/md.c mddev->private = priv; mddev 3902 drivers/md/md.c strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); mddev 3903 drivers/md/md.c mddev->level = mddev->new_level; mddev 3904 drivers/md/md.c mddev->layout = mddev->new_layout; mddev 3905 drivers/md/md.c mddev->chunk_sectors = mddev->new_chunk_sectors; mddev 3906 drivers/md/md.c mddev->delta_disks = 0; mddev 3907 drivers/md/md.c mddev->reshape_backwards = 0; mddev 3908 drivers/md/md.c mddev->degraded = 0; mddev 3909 drivers/md/md.c spin_unlock(&mddev->lock); mddev 3912 drivers/md/md.c mddev->external) { mddev 3920 drivers/md/md.c mddev->in_sync = 0; mddev 3921 drivers/md/md.c mddev->safemode_delay = 0; mddev 3922 drivers/md/md.c mddev->safemode = 0; mddev 3925 drivers/md/md.c oldpers->free(mddev, oldpriv); mddev 3930 drivers/md/md.c if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) mddev 3932 drivers/md/md.c mdname(mddev)); mddev 3933 drivers/md/md.c mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); mddev 3938 drivers/md/md.c if (mddev->to_remove == NULL) mddev 3939 drivers/md/md.c mddev->to_remove = &md_redundancy_group; mddev 3944 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 3947 drivers/md/md.c if (rdev->new_raid_disk >= mddev->raid_disks) mddev 3951 drivers/md/md.c sysfs_unlink_rdev(mddev, rdev); mddev 3953 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 3962 drivers/md/md.c if (sysfs_link_rdev(mddev, rdev)) mddev 3964 drivers/md/md.c rdev->raid_disk, mdname(mddev)); mddev 3972 drivers/md/md.c mddev->in_sync = 1; mddev 3973 drivers/md/md.c del_timer_sync(&mddev->safemode_timer); mddev 3975 drivers/md/md.c blk_set_stacking_limits(&mddev->queue->limits); mddev 3976 drivers/md/md.c pers->run(mddev); mddev 3977 drivers/md/md.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 3978 drivers/md/md.c mddev_resume(mddev); mddev 3979 drivers/md/md.c if (!mddev->thread) mddev 3980 drivers/md/md.c md_update_sb(mddev, 1); mddev 3981 drivers/md/md.c sysfs_notify(&mddev->kobj, NULL, "level"); mddev 3982 drivers/md/md.c md_new_event(mddev); mddev 3985 drivers/md/md.c mddev_unlock(mddev); mddev 3993 drivers/md/md.c layout_show(struct mddev *mddev, char *page) mddev 3996 drivers/md/md.c if (mddev->reshape_position != MaxSector && mddev 3997 drivers/md/md.c mddev->layout != mddev->new_layout) mddev 3999 drivers/md/md.c mddev->new_layout, mddev->layout); mddev 4000 drivers/md/md.c return sprintf(page, "%d\n", mddev->layout); mddev 4004 drivers/md/md.c layout_store(struct mddev *mddev, const char *buf, size_t len) mddev 4012 drivers/md/md.c err = mddev_lock(mddev); mddev 4016 drivers/md/md.c if (mddev->pers) { mddev 4017 drivers/md/md.c if (mddev->pers->check_reshape == NULL) mddev 4019 drivers/md/md.c else if (mddev->ro) mddev 4022 drivers/md/md.c mddev->new_layout = n; mddev 4023 drivers/md/md.c err = mddev->pers->check_reshape(mddev); mddev 4025 drivers/md/md.c mddev->new_layout = mddev->layout; mddev 4028 drivers/md/md.c mddev->new_layout = n; mddev 4029 drivers/md/md.c if (mddev->reshape_position == MaxSector) mddev 4030 drivers/md/md.c mddev->layout = n; mddev 4032 drivers/md/md.c mddev_unlock(mddev); mddev 4039 drivers/md/md.c raid_disks_show(struct mddev *mddev, char *page) mddev 4041 drivers/md/md.c if (mddev->raid_disks == 0) mddev 4043 drivers/md/md.c if (mddev->reshape_position != MaxSector && mddev 4044 drivers/md/md.c mddev->delta_disks != 0) mddev 4045 drivers/md/md.c return sprintf(page, "%d (%d)\n", mddev->raid_disks, mddev 4046 drivers/md/md.c mddev->raid_disks - mddev->delta_disks); mddev 4047 drivers/md/md.c return sprintf(page, "%d\n", mddev->raid_disks); mddev 4050 drivers/md/md.c static int update_raid_disks(struct mddev *mddev, int raid_disks); mddev 4053 drivers/md/md.c raid_disks_store(struct mddev *mddev, const char *buf, size_t len) mddev 4062 drivers/md/md.c err = mddev_lock(mddev); mddev 4065 drivers/md/md.c if (mddev->pers) mddev 4066 drivers/md/md.c err = update_raid_disks(mddev, n); mddev 4067 drivers/md/md.c else if (mddev->reshape_position != MaxSector) { mddev 4069 drivers/md/md.c int olddisks = mddev->raid_disks - mddev->delta_disks; mddev 4072 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 4081 drivers/md/md.c mddev->delta_disks = n - olddisks; mddev 4082 drivers/md/md.c mddev->raid_disks = n; mddev 4083 drivers/md/md.c mddev->reshape_backwards = (mddev->delta_disks < 0); mddev 4085 drivers/md/md.c mddev->raid_disks = n; mddev 4087 drivers/md/md.c mddev_unlock(mddev); mddev 4094 drivers/md/md.c chunk_size_show(struct mddev *mddev, char *page) mddev 4096 drivers/md/md.c if (mddev->reshape_position != MaxSector && mddev 4097 drivers/md/md.c mddev->chunk_sectors != mddev->new_chunk_sectors) mddev 4099 drivers/md/md.c mddev->new_chunk_sectors << 9, mddev 4100 drivers/md/md.c mddev->chunk_sectors << 9); mddev 4101 drivers/md/md.c return sprintf(page, "%d\n", mddev->chunk_sectors << 9); mddev 4105 drivers/md/md.c chunk_size_store(struct mddev *mddev, const char *buf, size_t len) mddev 4114 drivers/md/md.c err = mddev_lock(mddev); mddev 4117 drivers/md/md.c if (mddev->pers) { mddev 4118 drivers/md/md.c if (mddev->pers->check_reshape == NULL) mddev 4120 drivers/md/md.c else if (mddev->ro) mddev 4123 drivers/md/md.c mddev->new_chunk_sectors = n >> 9; mddev 4124 drivers/md/md.c err = mddev->pers->check_reshape(mddev); mddev 4126 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 4129 drivers/md/md.c mddev->new_chunk_sectors = n >> 9; mddev 4130 drivers/md/md.c if (mddev->reshape_position == MaxSector) mddev 4131 drivers/md/md.c mddev->chunk_sectors = n >> 9; mddev 4133 drivers/md/md.c mddev_unlock(mddev); mddev 4140 drivers/md/md.c resync_start_show(struct mddev *mddev, char *page) mddev 4142 drivers/md/md.c if (mddev->recovery_cp == MaxSector) mddev 4144 drivers/md/md.c return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); mddev 4148 drivers/md/md.c resync_start_store(struct mddev *mddev, const char *buf, size_t len) mddev 4163 drivers/md/md.c err = mddev_lock(mddev); mddev 4166 drivers/md/md.c if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) mddev 4170 drivers/md/md.c mddev->recovery_cp = n; mddev 4171 drivers/md/md.c if (mddev->pers) mddev 4172 drivers/md/md.c set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); mddev 4174 drivers/md/md.c mddev_unlock(mddev); mddev 4238 drivers/md/md.c array_state_show(struct mddev *mddev, char *page) mddev 4242 drivers/md/md.c if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { mddev 4243 drivers/md/md.c switch(mddev->ro) { mddev 4251 drivers/md/md.c spin_lock(&mddev->lock); mddev 4252 drivers/md/md.c if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) mddev 4254 drivers/md/md.c else if (mddev->in_sync) mddev 4256 drivers/md/md.c else if (mddev->safemode) mddev 4260 drivers/md/md.c spin_unlock(&mddev->lock); mddev 4263 drivers/md/md.c if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) mddev 4266 drivers/md/md.c if (list_empty(&mddev->disks) && mddev 4267 drivers/md/md.c mddev->raid_disks == 0 && mddev 4268 drivers/md/md.c mddev->dev_sectors == 0) mddev 4276 drivers/md/md.c static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev); mddev 4277 drivers/md/md.c static int md_set_readonly(struct mddev *mddev, struct block_device *bdev); mddev 4278 drivers/md/md.c static int do_md_run(struct mddev *mddev); mddev 4279 drivers/md/md.c static int restart_array(struct mddev *mddev); mddev 4282 drivers/md/md.c array_state_store(struct mddev *mddev, const char *buf, size_t len) mddev 4287 drivers/md/md.c if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) { mddev 4291 drivers/md/md.c spin_lock(&mddev->lock); mddev 4293 drivers/md/md.c restart_array(mddev); mddev 4294 drivers/md/md.c clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); mddev 4295 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 4296 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 4298 drivers/md/md.c restart_array(mddev); mddev 4299 drivers/md/md.c if (!set_in_sync(mddev)) mddev 4303 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 4304 drivers/md/md.c spin_unlock(&mddev->lock); mddev 4307 drivers/md/md.c err = mddev_lock(mddev); mddev 4316 drivers/md/md.c err = do_md_stop(mddev, 0, NULL); mddev 4320 drivers/md/md.c if (mddev->pers) mddev 4321 drivers/md/md.c err = do_md_stop(mddev, 2, NULL); mddev 4328 drivers/md/md.c if (mddev->pers) mddev 4329 drivers/md/md.c err = md_set_readonly(mddev, NULL); mddev 4331 drivers/md/md.c mddev->ro = 1; mddev 4332 drivers/md/md.c set_disk_ro(mddev->gendisk, 1); mddev 4333 drivers/md/md.c err = do_md_run(mddev); mddev 4337 drivers/md/md.c if (mddev->pers) { mddev 4338 drivers/md/md.c if (mddev->ro == 0) mddev 4339 drivers/md/md.c err = md_set_readonly(mddev, NULL); mddev 4340 drivers/md/md.c else if (mddev->ro == 1) mddev 4341 drivers/md/md.c err = restart_array(mddev); mddev 4343 drivers/md/md.c mddev->ro = 2; mddev 4344 drivers/md/md.c set_disk_ro(mddev->gendisk, 0); mddev 4347 drivers/md/md.c mddev->ro = 2; mddev 4348 drivers/md/md.c err = do_md_run(mddev); mddev 4352 drivers/md/md.c if (mddev->pers) { mddev 4353 drivers/md/md.c err = restart_array(mddev); mddev 4356 drivers/md/md.c spin_lock(&mddev->lock); mddev 4357 drivers/md/md.c if (!set_in_sync(mddev)) mddev 4359 drivers/md/md.c spin_unlock(&mddev->lock); mddev 4364 drivers/md/md.c if (mddev->pers) { mddev 4365 drivers/md/md.c err = restart_array(mddev); mddev 4368 drivers/md/md.c clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); mddev 4369 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 4372 drivers/md/md.c mddev->ro = 0; mddev 4373 drivers/md/md.c set_disk_ro(mddev->gendisk, 0); mddev 4374 drivers/md/md.c err = do_md_run(mddev); mddev 4385 drivers/md/md.c if (mddev->hold_active == UNTIL_IOCTL) mddev 4386 drivers/md/md.c mddev->hold_active = 0; mddev 4387 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 4389 drivers/md/md.c mddev_unlock(mddev); mddev 4396 drivers/md/md.c max_corrected_read_errors_show(struct mddev *mddev, char *page) { mddev 4398 drivers/md/md.c atomic_read(&mddev->max_corr_read_errors)); mddev 4402 drivers/md/md.c max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) mddev 4410 drivers/md/md.c atomic_set(&mddev->max_corr_read_errors, n); mddev 4419 drivers/md/md.c null_show(struct mddev *mddev, char *page) mddev 4425 drivers/md/md.c new_dev_store(struct mddev *mddev, const char *buf, size_t len) mddev 4453 drivers/md/md.c err = mddev_lock(mddev); mddev 4456 drivers/md/md.c if (mddev->persistent) { mddev 4457 drivers/md/md.c rdev = md_import_device(dev, mddev->major_version, mddev 4458 drivers/md/md.c mddev->minor_version); mddev 4459 drivers/md/md.c if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { mddev 4461 drivers/md/md.c = list_entry(mddev->disks.next, mddev 4463 drivers/md/md.c err = super_types[mddev->major_version] mddev 4464 drivers/md/md.c .load_super(rdev, rdev0, mddev->minor_version); mddev 4468 drivers/md/md.c } else if (mddev->external) mddev 4474 drivers/md/md.c mddev_unlock(mddev); mddev 4477 drivers/md/md.c err = bind_rdev_to_array(rdev, mddev); mddev 4481 drivers/md/md.c mddev_unlock(mddev); mddev 4483 drivers/md/md.c md_new_event(mddev); mddev 4491 drivers/md/md.c bitmap_store(struct mddev *mddev, const char *buf, size_t len) mddev 4497 drivers/md/md.c err = mddev_lock(mddev); mddev 4500 drivers/md/md.c if (!mddev->bitmap) mddev 4512 drivers/md/md.c md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); mddev 4515 drivers/md/md.c md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ mddev 4517 drivers/md/md.c mddev_unlock(mddev); mddev 4525 drivers/md/md.c size_show(struct mddev *mddev, char *page) mddev 4528 drivers/md/md.c (unsigned long long)mddev->dev_sectors / 2); mddev 4531 drivers/md/md.c static int update_size(struct mddev *mddev, sector_t num_sectors); mddev 4534 drivers/md/md.c size_store(struct mddev *mddev, const char *buf, size_t len) mddev 4545 drivers/md/md.c err = mddev_lock(mddev); mddev 4548 drivers/md/md.c if (mddev->pers) { mddev 4549 drivers/md/md.c err = update_size(mddev, sectors); mddev 4551 drivers/md/md.c md_update_sb(mddev, 1); mddev 4553 drivers/md/md.c if (mddev->dev_sectors == 0 || mddev 4554 drivers/md/md.c mddev->dev_sectors > sectors) mddev 4555 drivers/md/md.c mddev->dev_sectors = sectors; mddev 4559 drivers/md/md.c mddev_unlock(mddev); mddev 4573 drivers/md/md.c metadata_show(struct mddev *mddev, char *page) mddev 4575 drivers/md/md.c if (mddev->persistent) mddev 4577 drivers/md/md.c mddev->major_version, mddev->minor_version); mddev 4578 drivers/md/md.c else if (mddev->external) mddev 4579 drivers/md/md.c return sprintf(page, "external:%s\n", mddev->metadata_type); mddev 4585 drivers/md/md.c metadata_store(struct mddev *mddev, const char *buf, size_t len) mddev 4595 drivers/md/md.c err = mddev_lock(mddev); mddev 4599 drivers/md/md.c if (mddev->external && strncmp(buf, "external:", 9) == 0) mddev 4601 drivers/md/md.c else if (!list_empty(&mddev->disks)) mddev 4606 drivers/md/md.c mddev->persistent = 0; mddev 4607 drivers/md/md.c mddev->external = 0; mddev 4608 drivers/md/md.c mddev->major_version = 0; mddev 4609 drivers/md/md.c mddev->minor_version = 90; mddev 4614 drivers/md/md.c if (namelen >= sizeof(mddev->metadata_type)) mddev 4615 drivers/md/md.c namelen = sizeof(mddev->metadata_type)-1; mddev 4616 drivers/md/md.c strncpy(mddev->metadata_type, buf+9, namelen); mddev 4617 drivers/md/md.c mddev->metadata_type[namelen] = 0; mddev 4618 drivers/md/md.c if (namelen && mddev->metadata_type[namelen-1] == '\n') mddev 4619 drivers/md/md.c mddev->metadata_type[--namelen] = 0; mddev 4620 drivers/md/md.c mddev->persistent = 0; mddev 4621 drivers/md/md.c mddev->external = 1; mddev 4622 drivers/md/md.c mddev->major_version = 0; mddev 4623 drivers/md/md.c mddev->minor_version = 90; mddev 4637 drivers/md/md.c mddev->major_version = major; mddev 4638 drivers/md/md.c mddev->minor_version = minor; mddev 4639 drivers/md/md.c mddev->persistent = 1; mddev 4640 drivers/md/md.c mddev->external = 0; mddev 4643 drivers/md/md.c mddev_unlock(mddev); mddev 4651 drivers/md/md.c action_show(struct mddev *mddev, char *page) mddev 4654 drivers/md/md.c unsigned long recovery = mddev->recovery; mddev 4658 drivers/md/md.c (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) { mddev 4670 drivers/md/md.c else if (mddev->reshape_position != MaxSector) mddev 4677 drivers/md/md.c action_store(struct mddev *mddev, const char *page, size_t len) mddev 4679 drivers/md/md.c if (!mddev->pers || !mddev->pers->sync_request) mddev 4685 drivers/md/md.c set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 4687 drivers/md/md.c clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 4688 drivers/md/md.c if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && mddev 4689 drivers/md/md.c mddev_lock(mddev) == 0) { mddev 4691 drivers/md/md.c if (mddev->sync_thread) { mddev 4692 drivers/md/md.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 4693 drivers/md/md.c md_reap_sync_thread(mddev); mddev 4695 drivers/md/md.c mddev_unlock(mddev); mddev 4697 drivers/md/md.c } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) mddev 4700 drivers/md/md.c clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 4702 drivers/md/md.c clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 4703 drivers/md/md.c set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); mddev 4706 drivers/md/md.c if (mddev->pers->start_reshape == NULL) mddev 4708 drivers/md/md.c err = mddev_lock(mddev); mddev 4710 drivers/md/md.c if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) mddev 4713 drivers/md/md.c clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 4714 drivers/md/md.c err = mddev->pers->start_reshape(mddev); mddev 4716 drivers/md/md.c mddev_unlock(mddev); mddev 4720 drivers/md/md.c sysfs_notify(&mddev->kobj, NULL, "degraded"); mddev 4723 drivers/md/md.c set_bit(MD_RECOVERY_CHECK, &mddev->recovery); mddev 4726 drivers/md/md.c clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 4727 drivers/md/md.c set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); mddev 4728 drivers/md/md.c set_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 4730 drivers/md/md.c if (mddev->ro == 2) { mddev 4734 drivers/md/md.c mddev->ro = 0; mddev 4735 drivers/md/md.c md_wakeup_thread(mddev->sync_thread); mddev 4737 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 4738 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 4739 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_action); mddev 4747 drivers/md/md.c last_sync_action_show(struct mddev *mddev, char *page) mddev 4749 drivers/md/md.c return sprintf(page, "%s\n", mddev->last_sync_action); mddev 4755 drivers/md/md.c mismatch_cnt_show(struct mddev *mddev, char *page) mddev 4759 drivers/md/md.c atomic64_read(&mddev->resync_mismatches)); mddev 4765 drivers/md/md.c sync_min_show(struct mddev *mddev, char *page) mddev 4767 drivers/md/md.c return sprintf(page, "%d (%s)\n", speed_min(mddev), mddev 4768 drivers/md/md.c mddev->sync_speed_min ? "local": "system"); mddev 4772 drivers/md/md.c sync_min_store(struct mddev *mddev, const char *buf, size_t len) mddev 4786 drivers/md/md.c mddev->sync_speed_min = min; mddev 4794 drivers/md/md.c sync_max_show(struct mddev *mddev, char *page) mddev 4796 drivers/md/md.c return sprintf(page, "%d (%s)\n", speed_max(mddev), mddev 4797 drivers/md/md.c mddev->sync_speed_max ? "local": "system"); mddev 4801 drivers/md/md.c sync_max_store(struct mddev *mddev, const char *buf, size_t len) mddev 4815 drivers/md/md.c mddev->sync_speed_max = max; mddev 4823 drivers/md/md.c degraded_show(struct mddev *mddev, char *page) mddev 4825 drivers/md/md.c return sprintf(page, "%d\n", mddev->degraded); mddev 4830 drivers/md/md.c sync_force_parallel_show(struct mddev *mddev, char *page) mddev 4832 drivers/md/md.c return sprintf(page, "%d\n", mddev->parallel_resync); mddev 4836 drivers/md/md.c sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) mddev 4846 drivers/md/md.c mddev->parallel_resync = n; mddev 4848 drivers/md/md.c if (mddev->sync_thread) mddev 4860 drivers/md/md.c sync_speed_show(struct mddev *mddev, char *page) mddev 4863 drivers/md/md.c if (mddev->curr_resync == 0) mddev 4865 drivers/md/md.c resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); mddev 4866 drivers/md/md.c dt = (jiffies - mddev->resync_mark) / HZ; mddev 4868 drivers/md/md.c db = resync - mddev->resync_mark_cnt; mddev 4875 drivers/md/md.c sync_completed_show(struct mddev *mddev, char *page) mddev 4879 drivers/md/md.c if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) mddev 4882 drivers/md/md.c if (mddev->curr_resync == 1 || mddev 4883 drivers/md/md.c mddev->curr_resync == 2) mddev 4886 drivers/md/md.c if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || mddev 4887 drivers/md/md.c test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) mddev 4888 drivers/md/md.c max_sectors = mddev->resync_max_sectors; mddev 4890 drivers/md/md.c max_sectors = mddev->dev_sectors; mddev 4892 drivers/md/md.c resync = mddev->curr_resync_completed; mddev 4900 drivers/md/md.c min_sync_show(struct mddev *mddev, char *page) mddev 4903 drivers/md/md.c (unsigned long long)mddev->resync_min); mddev 4906 drivers/md/md.c min_sync_store(struct mddev *mddev, const char *buf, size_t len) mddev 4914 drivers/md/md.c spin_lock(&mddev->lock); mddev 4916 drivers/md/md.c if (min > mddev->resync_max) mddev 4920 drivers/md/md.c if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) mddev 4924 drivers/md/md.c mddev->resync_min = round_down(min, 8); mddev 4928 drivers/md/md.c spin_unlock(&mddev->lock); mddev 4936 drivers/md/md.c max_sync_show(struct mddev *mddev, char *page) mddev 4938 drivers/md/md.c if (mddev->resync_max == MaxSector) mddev 4942 drivers/md/md.c (unsigned long long)mddev->resync_max); mddev 4945 drivers/md/md.c max_sync_store(struct mddev *mddev, const char *buf, size_t len) mddev 4948 drivers/md/md.c spin_lock(&mddev->lock); mddev 4950 drivers/md/md.c mddev->resync_max = MaxSector; mddev 4958 drivers/md/md.c if (max < mddev->resync_min) mddev 4962 drivers/md/md.c if (max < mddev->resync_max && mddev 4963 drivers/md/md.c mddev->ro == 0 && mddev 4964 drivers/md/md.c test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) mddev 4968 drivers/md/md.c chunk = mddev->chunk_sectors; mddev 4976 drivers/md/md.c mddev->resync_max = max; mddev 4978 drivers/md/md.c wake_up(&mddev->recovery_wait); mddev 4981 drivers/md/md.c spin_unlock(&mddev->lock); mddev 4989 drivers/md/md.c suspend_lo_show(struct mddev *mddev, char *page) mddev 4991 drivers/md/md.c return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); mddev 4995 drivers/md/md.c suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) mddev 5006 drivers/md/md.c err = mddev_lock(mddev); mddev 5010 drivers/md/md.c if (mddev->pers == NULL || mddev 5011 drivers/md/md.c mddev->pers->quiesce == NULL) mddev 5013 drivers/md/md.c mddev_suspend(mddev); mddev 5014 drivers/md/md.c mddev->suspend_lo = new; mddev 5015 drivers/md/md.c mddev_resume(mddev); mddev 5019 drivers/md/md.c mddev_unlock(mddev); mddev 5026 drivers/md/md.c suspend_hi_show(struct mddev *mddev, char *page) mddev 5028 drivers/md/md.c return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); mddev 5032 drivers/md/md.c suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) mddev 5043 drivers/md/md.c err = mddev_lock(mddev); mddev 5047 drivers/md/md.c if (mddev->pers == NULL) mddev 5050 drivers/md/md.c mddev_suspend(mddev); mddev 5051 drivers/md/md.c mddev->suspend_hi = new; mddev 5052 drivers/md/md.c mddev_resume(mddev); mddev 5056 drivers/md/md.c mddev_unlock(mddev); mddev 5063 drivers/md/md.c reshape_position_show(struct mddev *mddev, char *page) mddev 5065 drivers/md/md.c if (mddev->reshape_position != MaxSector) mddev 5067 drivers/md/md.c (unsigned long long)mddev->reshape_position); mddev 5073 drivers/md/md.c reshape_position_store(struct mddev *mddev, const char *buf, size_t len) mddev 5084 drivers/md/md.c err = mddev_lock(mddev); mddev 5088 drivers/md/md.c if (mddev->pers) mddev 5090 drivers/md/md.c mddev->reshape_position = new; mddev 5091 drivers/md/md.c mddev->delta_disks = 0; mddev 5092 drivers/md/md.c mddev->reshape_backwards = 0; mddev 5093 drivers/md/md.c mddev->new_level = mddev->level; mddev 5094 drivers/md/md.c mddev->new_layout = mddev->layout; mddev 5095 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 5096 drivers/md/md.c rdev_for_each(rdev, mddev) mddev 5100 drivers/md/md.c mddev_unlock(mddev); mddev 5109 drivers/md/md.c reshape_direction_show(struct mddev *mddev, char *page) mddev 5112 drivers/md/md.c mddev->reshape_backwards ? "backwards" : "forwards"); mddev 5116 drivers/md/md.c reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) mddev 5127 drivers/md/md.c if (mddev->reshape_backwards == backwards) mddev 5130 drivers/md/md.c err = mddev_lock(mddev); mddev 5134 drivers/md/md.c if (mddev->delta_disks) mddev 5136 drivers/md/md.c else if (mddev->persistent && mddev 5137 drivers/md/md.c mddev->major_version == 0) mddev 5140 drivers/md/md.c mddev->reshape_backwards = backwards; mddev 5141 drivers/md/md.c mddev_unlock(mddev); mddev 5150 drivers/md/md.c array_size_show(struct mddev *mddev, char *page) mddev 5152 drivers/md/md.c if (mddev->external_size) mddev 5154 drivers/md/md.c (unsigned long long)mddev->array_sectors/2); mddev 5160 drivers/md/md.c array_size_store(struct mddev *mddev, const char *buf, size_t len) mddev 5165 drivers/md/md.c err = mddev_lock(mddev); mddev 5170 drivers/md/md.c if (mddev_is_clustered(mddev)) { mddev 5171 drivers/md/md.c mddev_unlock(mddev); mddev 5176 drivers/md/md.c if (mddev->pers) mddev 5177 drivers/md/md.c sectors = mddev->pers->size(mddev, 0, 0); mddev 5179 drivers/md/md.c sectors = mddev->array_sectors; mddev 5181 drivers/md/md.c mddev->external_size = 0; mddev 5185 drivers/md/md.c else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) mddev 5188 drivers/md/md.c mddev->external_size = 1; mddev 5192 drivers/md/md.c mddev->array_sectors = sectors; mddev 5193 drivers/md/md.c if (mddev->pers) { mddev 5194 drivers/md/md.c set_capacity(mddev->gendisk, mddev->array_sectors); mddev 5195 drivers/md/md.c revalidate_disk(mddev->gendisk); mddev 5198 drivers/md/md.c mddev_unlock(mddev); mddev 5207 drivers/md/md.c consistency_policy_show(struct mddev *mddev, char *page) mddev 5211 drivers/md/md.c if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { mddev 5213 drivers/md/md.c } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { mddev 5215 drivers/md/md.c } else if (mddev->bitmap) { mddev 5217 drivers/md/md.c } else if (mddev->pers) { mddev 5218 drivers/md/md.c if (mddev->pers->sync_request) mddev 5230 drivers/md/md.c consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) mddev 5234 drivers/md/md.c if (mddev->pers) { mddev 5235 drivers/md/md.c if (mddev->pers->change_consistency_policy) mddev 5236 drivers/md/md.c err = mddev->pers->change_consistency_policy(mddev, buf); mddev 5239 drivers/md/md.c } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { mddev 5240 drivers/md/md.c set_bit(MD_HAS_PPL, &mddev->flags); mddev 5252 drivers/md/md.c static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) mddev 5254 drivers/md/md.c return sprintf(page, "%d\n", mddev->fail_last_dev); mddev 5262 drivers/md/md.c fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) mddev 5271 drivers/md/md.c if (value != mddev->fail_last_dev) mddev 5272 drivers/md/md.c mddev->fail_last_dev = value; mddev 5326 drivers/md/md.c struct mddev *mddev = container_of(kobj, struct mddev, kobj); mddev 5332 drivers/md/md.c if (list_empty(&mddev->all_mddevs)) { mddev 5336 drivers/md/md.c mddev_get(mddev); mddev 5339 drivers/md/md.c rv = entry->show(mddev, page); mddev 5340 drivers/md/md.c mddev_put(mddev); mddev 5349 drivers/md/md.c struct mddev *mddev = container_of(kobj, struct mddev, kobj); mddev 5357 drivers/md/md.c if (list_empty(&mddev->all_mddevs)) { mddev 5361 drivers/md/md.c mddev_get(mddev); mddev 5363 drivers/md/md.c rv = entry->store(mddev, page, length); mddev 5364 drivers/md/md.c mddev_put(mddev); mddev 5370 drivers/md/md.c struct mddev *mddev = container_of(ko, struct mddev, kobj); mddev 5372 drivers/md/md.c if (mddev->sysfs_state) mddev 5373 drivers/md/md.c sysfs_put(mddev->sysfs_state); mddev 5375 drivers/md/md.c if (mddev->gendisk) mddev 5376 drivers/md/md.c del_gendisk(mddev->gendisk); mddev 5377 drivers/md/md.c if (mddev->queue) mddev 5378 drivers/md/md.c blk_cleanup_queue(mddev->queue); mddev 5379 drivers/md/md.c if (mddev->gendisk) mddev 5380 drivers/md/md.c put_disk(mddev->gendisk); mddev 5381 drivers/md/md.c percpu_ref_exit(&mddev->writes_pending); mddev 5383 drivers/md/md.c bioset_exit(&mddev->bio_set); mddev 5384 drivers/md/md.c bioset_exit(&mddev->sync_set); mddev 5385 drivers/md/md.c kfree(mddev); mddev 5402 drivers/md/md.c struct mddev *mddev = container_of(ws, struct mddev, del_work); mddev 5404 drivers/md/md.c sysfs_remove_group(&mddev->kobj, &md_bitmap_group); mddev 5405 drivers/md/md.c kobject_del(&mddev->kobj); mddev 5406 drivers/md/md.c kobject_put(&mddev->kobj); mddev 5411 drivers/md/md.c int mddev_init_writes_pending(struct mddev *mddev) mddev 5413 drivers/md/md.c if (mddev->writes_pending.percpu_count_ptr) mddev 5415 drivers/md/md.c if (percpu_ref_init(&mddev->writes_pending, no_op, mddev 5419 drivers/md/md.c percpu_ref_put(&mddev->writes_pending); mddev 5436 drivers/md/md.c struct mddev *mddev = mddev_find(dev); mddev 5443 drivers/md/md.c if (!mddev) mddev 5446 drivers/md/md.c partitioned = (MAJOR(mddev->unit) != MD_MAJOR); mddev 5448 drivers/md/md.c unit = MINOR(mddev->unit) >> shift; mddev 5457 drivers/md/md.c if (mddev->gendisk) mddev 5463 drivers/md/md.c struct mddev *mddev2; mddev 5478 drivers/md/md.c mddev->hold_active = UNTIL_STOP; mddev 5481 drivers/md/md.c mddev->queue = blk_alloc_queue(GFP_KERNEL); mddev 5482 drivers/md/md.c if (!mddev->queue) mddev 5484 drivers/md/md.c mddev->queue->queuedata = mddev; mddev 5486 drivers/md/md.c blk_queue_make_request(mddev->queue, md_make_request); mddev 5487 drivers/md/md.c blk_set_stacking_limits(&mddev->queue->limits); mddev 5491 drivers/md/md.c blk_cleanup_queue(mddev->queue); mddev 5492 drivers/md/md.c mddev->queue = NULL; mddev 5495 drivers/md/md.c disk->major = MAJOR(mddev->unit); mddev 5504 drivers/md/md.c disk->private_data = mddev; mddev 5505 drivers/md/md.c disk->queue = mddev->queue; mddev 5506 drivers/md/md.c blk_queue_write_cache(mddev->queue, true, true); mddev 5512 drivers/md/md.c mddev->gendisk = disk; mddev 5516 drivers/md/md.c mutex_lock(&mddev->open_mutex); mddev 5519 drivers/md/md.c error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); mddev 5528 drivers/md/md.c if (mddev->kobj.sd && mddev 5529 drivers/md/md.c sysfs_create_group(&mddev->kobj, &md_bitmap_group)) mddev 5531 drivers/md/md.c mutex_unlock(&mddev->open_mutex); mddev 5534 drivers/md/md.c if (!error && mddev->kobj.sd) { mddev 5535 drivers/md/md.c kobject_uevent(&mddev->kobj, KOBJ_ADD); mddev 5536 drivers/md/md.c mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); mddev 5538 drivers/md/md.c mddev_put(mddev); mddev 5580 drivers/md/md.c struct mddev *mddev = from_timer(mddev, t, safemode_timer); mddev 5582 drivers/md/md.c mddev->safemode = 1; mddev 5583 drivers/md/md.c if (mddev->external) mddev 5584 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 5586 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 5591 drivers/md/md.c int md_run(struct mddev *mddev) mddev 5597 drivers/md/md.c if (list_empty(&mddev->disks)) mddev 5601 drivers/md/md.c if (mddev->pers) mddev 5604 drivers/md/md.c if (mddev->sysfs_active) mddev 5610 drivers/md/md.c if (!mddev->raid_disks) { mddev 5611 drivers/md/md.c if (!mddev->persistent) mddev 5613 drivers/md/md.c err = analyze_sbs(mddev); mddev 5618 drivers/md/md.c if (mddev->level != LEVEL_NONE) mddev 5619 drivers/md/md.c request_module("md-level-%d", mddev->level); mddev 5620 drivers/md/md.c else if (mddev->clevel[0]) mddev 5621 drivers/md/md.c request_module("md-%s", mddev->clevel); mddev 5628 drivers/md/md.c mddev->has_superblocks = false; mddev 5629 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 5634 drivers/md/md.c if (mddev->ro != 1 && mddev 5637 drivers/md/md.c mddev->ro = 1; mddev 5638 drivers/md/md.c if (mddev->gendisk) mddev 5639 drivers/md/md.c set_disk_ro(mddev->gendisk, 1); mddev 5643 drivers/md/md.c mddev->has_superblocks = true; mddev 5652 drivers/md/md.c if (mddev->dev_sectors && mddev 5653 drivers/md/md.c rdev->data_offset + mddev->dev_sectors mddev 5656 drivers/md/md.c mdname(mddev)); mddev 5663 drivers/md/md.c mdname(mddev)); mddev 5670 drivers/md/md.c if (!bioset_initialized(&mddev->bio_set)) { mddev 5671 drivers/md/md.c err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); mddev 5675 drivers/md/md.c if (!bioset_initialized(&mddev->sync_set)) { mddev 5676 drivers/md/md.c err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); mddev 5682 drivers/md/md.c pers = find_pers(mddev->level, mddev->clevel); mddev 5685 drivers/md/md.c if (mddev->level != LEVEL_NONE) mddev 5687 drivers/md/md.c mddev->level); mddev 5690 drivers/md/md.c mddev->clevel); mddev 5695 drivers/md/md.c if (mddev->level != pers->level) { mddev 5696 drivers/md/md.c mddev->level = pers->level; mddev 5697 drivers/md/md.c mddev->new_level = pers->level; mddev 5699 drivers/md/md.c strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); mddev 5701 drivers/md/md.c if (mddev->reshape_position != MaxSector && mddev 5717 drivers/md/md.c rdev_for_each(rdev, mddev) mddev 5718 drivers/md/md.c rdev_for_each(rdev2, mddev) { mddev 5723 drivers/md/md.c mdname(mddev), mddev 5734 drivers/md/md.c mddev->recovery = 0; mddev 5736 drivers/md/md.c mddev->resync_max_sectors = mddev->dev_sectors; mddev 5738 drivers/md/md.c mddev->ok_start_degraded = start_dirty_degraded; mddev 5740 drivers/md/md.c if (start_readonly && mddev->ro == 0) mddev 5741 drivers/md/md.c mddev->ro = 2; /* read-only, but switch on first write */ mddev 5743 drivers/md/md.c err = pers->run(mddev); mddev 5746 drivers/md/md.c else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { mddev 5747 drivers/md/md.c WARN_ONCE(!mddev->external_size, mddev 5751 drivers/md/md.c (unsigned long long)mddev->array_sectors / 2, mddev 5752 drivers/md/md.c (unsigned long long)pers->size(mddev, 0, 0) / 2); mddev 5756 drivers/md/md.c (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { mddev 5759 drivers/md/md.c bitmap = md_bitmap_create(mddev, -1); mddev 5763 drivers/md/md.c mdname(mddev), err); mddev 5765 drivers/md/md.c mddev->bitmap = bitmap; mddev 5771 drivers/md/md.c if (mddev->bitmap_info.max_write_behind > 0) { mddev 5774 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 5779 drivers/md/md.c if (creat_pool && mddev->wb_info_pool == NULL) { mddev 5780 drivers/md/md.c mddev->wb_info_pool = mddev 5783 drivers/md/md.c if (!mddev->wb_info_pool) { mddev 5790 drivers/md/md.c if (mddev->queue) { mddev 5793 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 5800 drivers/md/md.c if (mddev->degraded) mddev 5803 drivers/md/md.c blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); mddev 5805 drivers/md/md.c blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); mddev 5806 drivers/md/md.c mddev->queue->backing_dev_info->congested_data = mddev; mddev 5807 drivers/md/md.c mddev->queue->backing_dev_info->congested_fn = md_congested; mddev 5810 drivers/md/md.c if (mddev->kobj.sd && mddev 5811 drivers/md/md.c sysfs_create_group(&mddev->kobj, &md_redundancy_group)) mddev 5813 drivers/md/md.c mdname(mddev)); mddev 5814 drivers/md/md.c mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); mddev 5815 drivers/md/md.c } else if (mddev->ro == 2) /* auto-readonly not meaningful */ mddev 5816 drivers/md/md.c mddev->ro = 0; mddev 5818 drivers/md/md.c atomic_set(&mddev->max_corr_read_errors, mddev 5820 drivers/md/md.c mddev->safemode = 0; mddev 5821 drivers/md/md.c if (mddev_is_clustered(mddev)) mddev 5822 drivers/md/md.c mddev->safemode_delay = 0; mddev 5824 drivers/md/md.c mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ mddev 5825 drivers/md/md.c mddev->in_sync = 1; mddev 5827 drivers/md/md.c spin_lock(&mddev->lock); mddev 5828 drivers/md/md.c mddev->pers = pers; mddev 5829 drivers/md/md.c spin_unlock(&mddev->lock); mddev 5830 drivers/md/md.c rdev_for_each(rdev, mddev) mddev 5832 drivers/md/md.c sysfs_link_rdev(mddev, rdev); /* failure here is OK */ mddev 5834 drivers/md/md.c if (mddev->degraded && !mddev->ro) mddev 5838 drivers/md/md.c set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); mddev 5839 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 5841 drivers/md/md.c if (mddev->sb_flags) mddev 5842 drivers/md/md.c md_update_sb(mddev, 0); mddev 5844 drivers/md/md.c md_new_event(mddev); mddev 5848 drivers/md/md.c mddev_detach(mddev); mddev 5849 drivers/md/md.c if (mddev->private) mddev 5850 drivers/md/md.c pers->free(mddev, mddev->private); mddev 5851 drivers/md/md.c mddev->private = NULL; mddev 5853 drivers/md/md.c md_bitmap_destroy(mddev); mddev 5855 drivers/md/md.c bioset_exit(&mddev->bio_set); mddev 5856 drivers/md/md.c bioset_exit(&mddev->sync_set); mddev 5861 drivers/md/md.c static int do_md_run(struct mddev *mddev) mddev 5865 drivers/md/md.c set_bit(MD_NOT_READY, &mddev->flags); mddev 5866 drivers/md/md.c err = md_run(mddev); mddev 5869 drivers/md/md.c err = md_bitmap_load(mddev); mddev 5871 drivers/md/md.c md_bitmap_destroy(mddev); mddev 5875 drivers/md/md.c if (mddev_is_clustered(mddev)) mddev 5876 drivers/md/md.c md_allow_write(mddev); mddev 5879 drivers/md/md.c md_start(mddev); mddev 5881 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 5882 drivers/md/md.c md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ mddev 5884 drivers/md/md.c set_capacity(mddev->gendisk, mddev->array_sectors); mddev 5885 drivers/md/md.c revalidate_disk(mddev->gendisk); mddev 5886 drivers/md/md.c clear_bit(MD_NOT_READY, &mddev->flags); mddev 5887 drivers/md/md.c mddev->changed = 1; mddev 5888 drivers/md/md.c kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); mddev 5889 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 5890 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_action); mddev 5891 drivers/md/md.c sysfs_notify(&mddev->kobj, NULL, "degraded"); mddev 5893 drivers/md/md.c clear_bit(MD_NOT_READY, &mddev->flags); mddev 5897 drivers/md/md.c int md_start(struct mddev *mddev) mddev 5901 drivers/md/md.c if (mddev->pers->start) { mddev 5902 drivers/md/md.c set_bit(MD_RECOVERY_WAIT, &mddev->recovery); mddev 5903 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 5904 drivers/md/md.c ret = mddev->pers->start(mddev); mddev 5905 drivers/md/md.c clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); mddev 5906 drivers/md/md.c md_wakeup_thread(mddev->sync_thread); mddev 5912 drivers/md/md.c static int restart_array(struct mddev *mddev) mddev 5914 drivers/md/md.c struct gendisk *disk = mddev->gendisk; mddev 5920 drivers/md/md.c if (list_empty(&mddev->disks)) mddev 5922 drivers/md/md.c if (!mddev->pers) mddev 5924 drivers/md/md.c if (!mddev->ro) mddev 5928 drivers/md/md.c rdev_for_each_rcu(rdev, mddev) { mddev 5936 drivers/md/md.c if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) mddev 5942 drivers/md/md.c mddev->safemode = 0; mddev 5943 drivers/md/md.c mddev->ro = 0; mddev 5945 drivers/md/md.c pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); mddev 5947 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 5948 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 5949 drivers/md/md.c md_wakeup_thread(mddev->sync_thread); mddev 5950 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 5954 drivers/md/md.c static void md_clean(struct mddev *mddev) mddev 5956 drivers/md/md.c mddev->array_sectors = 0; mddev 5957 drivers/md/md.c mddev->external_size = 0; mddev 5958 drivers/md/md.c mddev->dev_sectors = 0; mddev 5959 drivers/md/md.c mddev->raid_disks = 0; mddev 5960 drivers/md/md.c mddev->recovery_cp = 0; mddev 5961 drivers/md/md.c mddev->resync_min = 0; mddev 5962 drivers/md/md.c mddev->resync_max = MaxSector; mddev 5963 drivers/md/md.c mddev->reshape_position = MaxSector; mddev 5964 drivers/md/md.c mddev->external = 0; mddev 5965 drivers/md/md.c mddev->persistent = 0; mddev 5966 drivers/md/md.c mddev->level = LEVEL_NONE; mddev 5967 drivers/md/md.c mddev->clevel[0] = 0; mddev 5968 drivers/md/md.c mddev->flags = 0; mddev 5969 drivers/md/md.c mddev->sb_flags = 0; mddev 5970 drivers/md/md.c mddev->ro = 0; mddev 5971 drivers/md/md.c mddev->metadata_type[0] = 0; mddev 5972 drivers/md/md.c mddev->chunk_sectors = 0; mddev 5973 drivers/md/md.c mddev->ctime = mddev->utime = 0; mddev 5974 drivers/md/md.c mddev->layout = 0; mddev 5975 drivers/md/md.c mddev->max_disks = 0; mddev 5976 drivers/md/md.c mddev->events = 0; mddev 5977 drivers/md/md.c mddev->can_decrease_events = 0; mddev 5978 drivers/md/md.c mddev->delta_disks = 0; mddev 5979 drivers/md/md.c mddev->reshape_backwards = 0; mddev 5980 drivers/md/md.c mddev->new_level = LEVEL_NONE; mddev 5981 drivers/md/md.c mddev->new_layout = 0; mddev 5982 drivers/md/md.c mddev->new_chunk_sectors = 0; mddev 5983 drivers/md/md.c mddev->curr_resync = 0; mddev 5984 drivers/md/md.c atomic64_set(&mddev->resync_mismatches, 0); mddev 5985 drivers/md/md.c mddev->suspend_lo = mddev->suspend_hi = 0; mddev 5986 drivers/md/md.c mddev->sync_speed_min = mddev->sync_speed_max = 0; mddev 5987 drivers/md/md.c mddev->recovery = 0; mddev 5988 drivers/md/md.c mddev->in_sync = 0; mddev 5989 drivers/md/md.c mddev->changed = 0; mddev 5990 drivers/md/md.c mddev->degraded = 0; mddev 5991 drivers/md/md.c mddev->safemode = 0; mddev 5992 drivers/md/md.c mddev->private = NULL; mddev 5993 drivers/md/md.c mddev->cluster_info = NULL; mddev 5994 drivers/md/md.c mddev->bitmap_info.offset = 0; mddev 5995 drivers/md/md.c mddev->bitmap_info.default_offset = 0; mddev 5996 drivers/md/md.c mddev->bitmap_info.default_space = 0; mddev 5997 drivers/md/md.c mddev->bitmap_info.chunksize = 0; mddev 5998 drivers/md/md.c mddev->bitmap_info.daemon_sleep = 0; mddev 5999 drivers/md/md.c mddev->bitmap_info.max_write_behind = 0; mddev 6000 drivers/md/md.c mddev->bitmap_info.nodes = 0; mddev 6003 drivers/md/md.c static void __md_stop_writes(struct mddev *mddev) mddev 6005 drivers/md/md.c set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 6007 drivers/md/md.c if (mddev->sync_thread) { mddev 6008 drivers/md/md.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 6009 drivers/md/md.c md_reap_sync_thread(mddev); mddev 6012 drivers/md/md.c del_timer_sync(&mddev->safemode_timer); mddev 6014 drivers/md/md.c if (mddev->pers && mddev->pers->quiesce) { mddev 6015 drivers/md/md.c mddev->pers->quiesce(mddev, 1); mddev 6016 drivers/md/md.c mddev->pers->quiesce(mddev, 0); mddev 6018 drivers/md/md.c md_bitmap_flush(mddev); mddev 6020 drivers/md/md.c if (mddev->ro == 0 && mddev 6021 drivers/md/md.c ((!mddev->in_sync && !mddev_is_clustered(mddev)) || mddev 6022 drivers/md/md.c mddev->sb_flags)) { mddev 6024 drivers/md/md.c if (!mddev_is_clustered(mddev)) mddev 6025 drivers/md/md.c mddev->in_sync = 1; mddev 6026 drivers/md/md.c md_update_sb(mddev, 1); mddev 6028 drivers/md/md.c mempool_destroy(mddev->wb_info_pool); mddev 6029 drivers/md/md.c mddev->wb_info_pool = NULL; mddev 6032 drivers/md/md.c void md_stop_writes(struct mddev *mddev) mddev 6034 drivers/md/md.c mddev_lock_nointr(mddev); mddev 6035 drivers/md/md.c __md_stop_writes(mddev); mddev 6036 drivers/md/md.c mddev_unlock(mddev); mddev 6040 drivers/md/md.c static void mddev_detach(struct mddev *mddev) mddev 6042 drivers/md/md.c md_bitmap_wait_behind_writes(mddev); mddev 6043 drivers/md/md.c if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) { mddev 6044 drivers/md/md.c mddev->pers->quiesce(mddev, 1); mddev 6045 drivers/md/md.c mddev->pers->quiesce(mddev, 0); mddev 6047 drivers/md/md.c md_unregister_thread(&mddev->thread); mddev 6048 drivers/md/md.c if (mddev->queue) mddev 6049 drivers/md/md.c blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ mddev 6052 drivers/md/md.c static void __md_stop(struct mddev *mddev) mddev 6054 drivers/md/md.c struct md_personality *pers = mddev->pers; mddev 6055 drivers/md/md.c md_bitmap_destroy(mddev); mddev 6056 drivers/md/md.c mddev_detach(mddev); mddev 6059 drivers/md/md.c spin_lock(&mddev->lock); mddev 6060 drivers/md/md.c mddev->pers = NULL; mddev 6061 drivers/md/md.c spin_unlock(&mddev->lock); mddev 6062 drivers/md/md.c pers->free(mddev, mddev->private); mddev 6063 drivers/md/md.c mddev->private = NULL; mddev 6064 drivers/md/md.c if (pers->sync_request && mddev->to_remove == NULL) mddev 6065 drivers/md/md.c mddev->to_remove = &md_redundancy_group; mddev 6067 drivers/md/md.c clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 6070 drivers/md/md.c void md_stop(struct mddev *mddev) mddev 6075 drivers/md/md.c __md_stop(mddev); mddev 6076 drivers/md/md.c bioset_exit(&mddev->bio_set); mddev 6077 drivers/md/md.c bioset_exit(&mddev->sync_set); mddev 6082 drivers/md/md.c static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) mddev 6087 drivers/md/md.c if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { mddev 6089 drivers/md/md.c set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 6090 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 6092 drivers/md/md.c if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) mddev 6093 drivers/md/md.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 6094 drivers/md/md.c if (mddev->sync_thread) mddev 6097 drivers/md/md.c wake_up_process(mddev->sync_thread->tsk); mddev 6099 drivers/md/md.c if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) mddev 6101 drivers/md/md.c mddev_unlock(mddev); mddev 6103 drivers/md/md.c &mddev->recovery)); mddev 6104 drivers/md/md.c wait_event(mddev->sb_wait, mddev 6105 drivers/md/md.c !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev 6106 drivers/md/md.c mddev_lock_nointr(mddev); mddev 6108 drivers/md/md.c mutex_lock(&mddev->open_mutex); mddev 6109 drivers/md/md.c if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || mddev 6110 drivers/md/md.c mddev->sync_thread || mddev 6111 drivers/md/md.c test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { mddev 6112 drivers/md/md.c pr_warn("md: %s still in use.\n",mdname(mddev)); mddev 6114 drivers/md/md.c clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 6115 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 6116 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 6121 drivers/md/md.c if (mddev->pers) { mddev 6122 drivers/md/md.c __md_stop_writes(mddev); mddev 6125 drivers/md/md.c if (mddev->ro==1) mddev 6127 drivers/md/md.c mddev->ro = 1; mddev 6128 drivers/md/md.c set_disk_ro(mddev->gendisk, 1); mddev 6129 drivers/md/md.c clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 6130 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 6131 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 6132 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 6136 drivers/md/md.c mutex_unlock(&mddev->open_mutex); mddev 6144 drivers/md/md.c static int do_md_stop(struct mddev *mddev, int mode, mddev 6147 drivers/md/md.c struct gendisk *disk = mddev->gendisk; mddev 6151 drivers/md/md.c if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { mddev 6153 drivers/md/md.c set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 6154 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 6156 drivers/md/md.c if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) mddev 6157 drivers/md/md.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 6158 drivers/md/md.c if (mddev->sync_thread) mddev 6161 drivers/md/md.c wake_up_process(mddev->sync_thread->tsk); mddev 6163 drivers/md/md.c mddev_unlock(mddev); mddev 6164 drivers/md/md.c wait_event(resync_wait, (mddev->sync_thread == NULL && mddev 6166 drivers/md/md.c &mddev->recovery))); mddev 6167 drivers/md/md.c mddev_lock_nointr(mddev); mddev 6169 drivers/md/md.c mutex_lock(&mddev->open_mutex); mddev 6170 drivers/md/md.c if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) || mddev 6171 drivers/md/md.c mddev->sysfs_active || mddev 6172 drivers/md/md.c mddev->sync_thread || mddev 6173 drivers/md/md.c test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { mddev 6174 drivers/md/md.c pr_warn("md: %s still in use.\n",mdname(mddev)); mddev 6175 drivers/md/md.c mutex_unlock(&mddev->open_mutex); mddev 6177 drivers/md/md.c clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); mddev 6178 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 6179 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 6183 drivers/md/md.c if (mddev->pers) { mddev 6184 drivers/md/md.c if (mddev->ro) mddev 6187 drivers/md/md.c __md_stop_writes(mddev); mddev 6188 drivers/md/md.c __md_stop(mddev); mddev 6189 drivers/md/md.c mddev->queue->backing_dev_info->congested_fn = NULL; mddev 6192 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 6194 drivers/md/md.c rdev_for_each(rdev, mddev) mddev 6196 drivers/md/md.c sysfs_unlink_rdev(mddev, rdev); mddev 6199 drivers/md/md.c mutex_unlock(&mddev->open_mutex); mddev 6200 drivers/md/md.c mddev->changed = 1; mddev 6203 drivers/md/md.c if (mddev->ro) mddev 6204 drivers/md/md.c mddev->ro = 0; mddev 6206 drivers/md/md.c mutex_unlock(&mddev->open_mutex); mddev 6211 drivers/md/md.c pr_info("md: %s stopped.\n", mdname(mddev)); mddev 6213 drivers/md/md.c if (mddev->bitmap_info.file) { mddev 6214 drivers/md/md.c struct file *f = mddev->bitmap_info.file; mddev 6215 drivers/md/md.c spin_lock(&mddev->lock); mddev 6216 drivers/md/md.c mddev->bitmap_info.file = NULL; mddev 6217 drivers/md/md.c spin_unlock(&mddev->lock); mddev 6220 drivers/md/md.c mddev->bitmap_info.offset = 0; mddev 6222 drivers/md/md.c export_array(mddev); mddev 6224 drivers/md/md.c md_clean(mddev); mddev 6225 drivers/md/md.c if (mddev->hold_active == UNTIL_STOP) mddev 6226 drivers/md/md.c mddev->hold_active = 0; mddev 6228 drivers/md/md.c md_new_event(mddev); mddev 6229 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 6234 drivers/md/md.c static void autorun_array(struct mddev *mddev) mddev 6239 drivers/md/md.c if (list_empty(&mddev->disks)) mddev 6244 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 6250 drivers/md/md.c err = do_md_run(mddev); mddev 6253 drivers/md/md.c do_md_stop(mddev, 0, NULL); mddev 6272 drivers/md/md.c struct mddev *mddev; mddev 6311 drivers/md/md.c mddev = mddev_find(dev); mddev 6312 drivers/md/md.c if (!mddev || !mddev->gendisk) { mddev 6313 drivers/md/md.c if (mddev) mddev 6314 drivers/md/md.c mddev_put(mddev); mddev 6317 drivers/md/md.c if (mddev_lock(mddev)) mddev 6318 drivers/md/md.c pr_warn("md: %s locked, cannot run\n", mdname(mddev)); mddev 6319 drivers/md/md.c else if (mddev->raid_disks || mddev->major_version mddev 6320 drivers/md/md.c || !list_empty(&mddev->disks)) { mddev 6322 drivers/md/md.c mdname(mddev), bdevname(rdev0->bdev,b)); mddev 6323 drivers/md/md.c mddev_unlock(mddev); mddev 6325 drivers/md/md.c pr_debug("md: created %s\n", mdname(mddev)); mddev 6326 drivers/md/md.c mddev->persistent = 1; mddev 6329 drivers/md/md.c if (bind_rdev_to_array(rdev, mddev)) mddev 6332 drivers/md/md.c autorun_array(mddev); mddev 6333 drivers/md/md.c mddev_unlock(mddev); mddev 6342 drivers/md/md.c mddev_put(mddev); mddev 6362 drivers/md/md.c static int get_array_info(struct mddev *mddev, void __user *arg) mddev 6370 drivers/md/md.c rdev_for_each_rcu(rdev, mddev) { mddev 6387 drivers/md/md.c info.major_version = mddev->major_version; mddev 6388 drivers/md/md.c info.minor_version = mddev->minor_version; mddev 6390 drivers/md/md.c info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); mddev 6391 drivers/md/md.c info.level = mddev->level; mddev 6392 drivers/md/md.c info.size = mddev->dev_sectors / 2; mddev 6393 drivers/md/md.c if (info.size != mddev->dev_sectors / 2) /* overflow */ mddev 6396 drivers/md/md.c info.raid_disks = mddev->raid_disks; mddev 6397 drivers/md/md.c info.md_minor = mddev->md_minor; mddev 6398 drivers/md/md.c info.not_persistent= !mddev->persistent; mddev 6400 drivers/md/md.c info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); mddev 6402 drivers/md/md.c if (mddev->in_sync) mddev 6404 drivers/md/md.c if (mddev->bitmap && mddev->bitmap_info.offset) mddev 6406 drivers/md/md.c if (mddev_is_clustered(mddev)) mddev 6413 drivers/md/md.c info.layout = mddev->layout; mddev 6414 drivers/md/md.c info.chunk_size = mddev->chunk_sectors << 9; mddev 6422 drivers/md/md.c static int get_bitmap_file(struct mddev *mddev, void __user * arg) mddev 6433 drivers/md/md.c spin_lock(&mddev->lock); mddev 6435 drivers/md/md.c if (mddev->bitmap_info.file) { mddev 6436 drivers/md/md.c ptr = file_path(mddev->bitmap_info.file, file->pathname, mddev 6444 drivers/md/md.c spin_unlock(&mddev->lock); mddev 6454 drivers/md/md.c static int get_disk_info(struct mddev *mddev, void __user * arg) mddev 6463 drivers/md/md.c rdev = md_find_rdev_nr_rcu(mddev, info.number); mddev 6494 drivers/md/md.c static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info) mddev 6500 drivers/md/md.c if (mddev_is_clustered(mddev) && mddev 6503 drivers/md/md.c mdname(mddev)); mddev 6510 drivers/md/md.c if (!mddev->raid_disks) { mddev 6513 drivers/md/md.c rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); mddev 6519 drivers/md/md.c if (!list_empty(&mddev->disks)) { mddev 6521 drivers/md/md.c = list_entry(mddev->disks.next, mddev 6523 drivers/md/md.c err = super_types[mddev->major_version] mddev 6524 drivers/md/md.c .load_super(rdev, rdev0, mddev->minor_version); mddev 6533 drivers/md/md.c err = bind_rdev_to_array(rdev, mddev); mddev 6544 drivers/md/md.c if (mddev->pers) { mddev 6546 drivers/md/md.c if (!mddev->pers->hot_add_disk) { mddev 6548 drivers/md/md.c mdname(mddev)); mddev 6551 drivers/md/md.c if (mddev->persistent) mddev 6552 drivers/md/md.c rdev = md_import_device(dev, mddev->major_version, mddev 6553 drivers/md/md.c mddev->minor_version); mddev 6562 drivers/md/md.c if (!mddev->persistent) { mddev 6564 drivers/md/md.c info->raid_disk < mddev->raid_disks) { mddev 6572 drivers/md/md.c super_types[mddev->major_version]. mddev 6573 drivers/md/md.c validate_super(mddev, rdev); mddev 6598 drivers/md/md.c rdev_for_each(rdev2, mddev) { mddev 6604 drivers/md/md.c if (has_journal || mddev->bitmap) { mddev 6613 drivers/md/md.c if (mddev_is_clustered(mddev)) { mddev 6618 drivers/md/md.c err = md_cluster_ops->add_new_disk(mddev, rdev); mddev 6627 drivers/md/md.c err = bind_rdev_to_array(rdev, mddev); mddev 6632 drivers/md/md.c if (mddev_is_clustered(mddev)) { mddev 6635 drivers/md/md.c err = md_cluster_ops->new_disk_ack(mddev, mddev 6642 drivers/md/md.c md_cluster_ops->add_new_disk_cancel(mddev); mddev 6656 drivers/md/md.c if (mddev->major_version != 0) { mddev 6657 drivers/md/md.c pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); mddev 6670 drivers/md/md.c if (info->raid_disk < mddev->raid_disks) mddev 6675 drivers/md/md.c if (rdev->raid_disk < mddev->raid_disks) mddev 6684 drivers/md/md.c if (!mddev->persistent) { mddev 6691 drivers/md/md.c err = bind_rdev_to_array(rdev, mddev); mddev 6701 drivers/md/md.c static int hot_remove_disk(struct mddev *mddev, dev_t dev) mddev 6706 drivers/md/md.c if (!mddev->pers) mddev 6709 drivers/md/md.c rdev = find_rdev(mddev, dev); mddev 6717 drivers/md/md.c remove_and_add_spares(mddev, rdev); mddev 6723 drivers/md/md.c if (mddev_is_clustered(mddev)) mddev 6724 drivers/md/md.c md_cluster_ops->remove_disk(mddev, rdev); mddev 6727 drivers/md/md.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 6728 drivers/md/md.c if (mddev->thread) mddev 6729 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 6731 drivers/md/md.c md_update_sb(mddev, 1); mddev 6732 drivers/md/md.c md_new_event(mddev); mddev 6737 drivers/md/md.c bdevname(rdev->bdev,b), mdname(mddev)); mddev 6741 drivers/md/md.c static int hot_add_disk(struct mddev *mddev, dev_t dev) mddev 6747 drivers/md/md.c if (!mddev->pers) mddev 6750 drivers/md/md.c if (mddev->major_version != 0) { mddev 6752 drivers/md/md.c mdname(mddev)); mddev 6755 drivers/md/md.c if (!mddev->pers->hot_add_disk) { mddev 6757 drivers/md/md.c mdname(mddev)); mddev 6768 drivers/md/md.c if (mddev->persistent) mddev 6777 drivers/md/md.c bdevname(rdev->bdev,b), mdname(mddev)); mddev 6785 drivers/md/md.c err = bind_rdev_to_array(rdev, mddev); mddev 6796 drivers/md/md.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 6797 drivers/md/md.c if (!mddev->thread) mddev 6798 drivers/md/md.c md_update_sb(mddev, 1); mddev 6803 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 6804 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 6805 drivers/md/md.c md_new_event(mddev); mddev 6813 drivers/md/md.c static int set_bitmap_file(struct mddev *mddev, int fd) mddev 6817 drivers/md/md.c if (mddev->pers) { mddev 6818 drivers/md/md.c if (!mddev->pers->quiesce || !mddev->thread) mddev 6820 drivers/md/md.c if (mddev->recovery || mddev->sync_thread) mddev 6829 drivers/md/md.c if (mddev->bitmap || mddev->bitmap_info.file) mddev 6835 drivers/md/md.c mdname(mddev)); mddev 6842 drivers/md/md.c mdname(mddev)); mddev 6846 drivers/md/md.c mdname(mddev)); mddev 6850 drivers/md/md.c mdname(mddev)); mddev 6857 drivers/md/md.c mddev->bitmap_info.file = f; mddev 6858 drivers/md/md.c mddev->bitmap_info.offset = 0; /* file overrides offset */ mddev 6859 drivers/md/md.c } else if (mddev->bitmap == NULL) mddev 6862 drivers/md/md.c if (mddev->pers) { mddev 6866 drivers/md/md.c bitmap = md_bitmap_create(mddev, -1); mddev 6867 drivers/md/md.c mddev_suspend(mddev); mddev 6869 drivers/md/md.c mddev->bitmap = bitmap; mddev 6870 drivers/md/md.c err = md_bitmap_load(mddev); mddev 6874 drivers/md/md.c md_bitmap_destroy(mddev); mddev 6877 drivers/md/md.c mddev_resume(mddev); mddev 6879 drivers/md/md.c mddev_suspend(mddev); mddev 6880 drivers/md/md.c md_bitmap_destroy(mddev); mddev 6881 drivers/md/md.c mddev_resume(mddev); mddev 6885 drivers/md/md.c struct file *f = mddev->bitmap_info.file; mddev 6887 drivers/md/md.c spin_lock(&mddev->lock); mddev 6888 drivers/md/md.c mddev->bitmap_info.file = NULL; mddev 6889 drivers/md/md.c spin_unlock(&mddev->lock); mddev 6910 drivers/md/md.c static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev 6923 drivers/md/md.c mddev->major_version = info->major_version; mddev 6924 drivers/md/md.c mddev->minor_version = info->minor_version; mddev 6925 drivers/md/md.c mddev->patch_version = info->patch_version; mddev 6926 drivers/md/md.c mddev->persistent = !info->not_persistent; mddev 6930 drivers/md/md.c mddev->ctime = ktime_get_real_seconds(); mddev 6933 drivers/md/md.c mddev->major_version = MD_MAJOR_VERSION; mddev 6934 drivers/md/md.c mddev->minor_version = MD_MINOR_VERSION; mddev 6935 drivers/md/md.c mddev->patch_version = MD_PATCHLEVEL_VERSION; mddev 6936 drivers/md/md.c mddev->ctime = ktime_get_real_seconds(); mddev 6938 drivers/md/md.c mddev->level = info->level; mddev 6939 drivers/md/md.c mddev->clevel[0] = 0; mddev 6940 drivers/md/md.c mddev->dev_sectors = 2 * (sector_t)info->size; mddev 6941 drivers/md/md.c mddev->raid_disks = info->raid_disks; mddev 6946 drivers/md/md.c mddev->recovery_cp = MaxSector; mddev 6948 drivers/md/md.c mddev->recovery_cp = 0; mddev 6949 drivers/md/md.c mddev->persistent = ! info->not_persistent; mddev 6950 drivers/md/md.c mddev->external = 0; mddev 6952 drivers/md/md.c mddev->layout = info->layout; mddev 6953 drivers/md/md.c if (mddev->level == 0) mddev 6955 drivers/md/md.c mddev->layout = -1; mddev 6956 drivers/md/md.c mddev->chunk_sectors = info->chunk_size >> 9; mddev 6958 drivers/md/md.c if (mddev->persistent) { mddev 6959 drivers/md/md.c mddev->max_disks = MD_SB_DISKS; mddev 6960 drivers/md/md.c mddev->flags = 0; mddev 6961 drivers/md/md.c mddev->sb_flags = 0; mddev 6963 drivers/md/md.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 6965 drivers/md/md.c mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; mddev 6966 drivers/md/md.c mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); mddev 6967 drivers/md/md.c mddev->bitmap_info.offset = 0; mddev 6969 drivers/md/md.c mddev->reshape_position = MaxSector; mddev 6974 drivers/md/md.c get_random_bytes(mddev->uuid, 16); mddev 6976 drivers/md/md.c mddev->new_level = mddev->level; mddev 6977 drivers/md/md.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 6978 drivers/md/md.c mddev->new_layout = mddev->layout; mddev 6979 drivers/md/md.c mddev->delta_disks = 0; mddev 6980 drivers/md/md.c mddev->reshape_backwards = 0; mddev 6985 drivers/md/md.c void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) mddev 6987 drivers/md/md.c lockdep_assert_held(&mddev->reconfig_mutex); mddev 6989 drivers/md/md.c if (mddev->external_size) mddev 6992 drivers/md/md.c mddev->array_sectors = array_sectors; mddev 6996 drivers/md/md.c static int update_size(struct mddev *mddev, sector_t num_sectors) mddev 7001 drivers/md/md.c sector_t old_dev_sectors = mddev->dev_sectors; mddev 7003 drivers/md/md.c if (mddev->pers->resize == NULL) mddev 7014 drivers/md/md.c if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || mddev 7015 drivers/md/md.c mddev->sync_thread) mddev 7017 drivers/md/md.c if (mddev->ro) mddev 7020 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 7028 drivers/md/md.c rv = mddev->pers->resize(mddev, num_sectors); mddev 7030 drivers/md/md.c if (mddev_is_clustered(mddev)) mddev 7031 drivers/md/md.c md_cluster_ops->update_size(mddev, old_dev_sectors); mddev 7032 drivers/md/md.c else if (mddev->queue) { mddev 7033 drivers/md/md.c set_capacity(mddev->gendisk, mddev->array_sectors); mddev 7034 drivers/md/md.c revalidate_disk(mddev->gendisk); mddev 7040 drivers/md/md.c static int update_raid_disks(struct mddev *mddev, int raid_disks) mddev 7045 drivers/md/md.c if (mddev->pers->check_reshape == NULL) mddev 7047 drivers/md/md.c if (mddev->ro) mddev 7050 drivers/md/md.c (mddev->max_disks && raid_disks >= mddev->max_disks)) mddev 7052 drivers/md/md.c if (mddev->sync_thread || mddev 7053 drivers/md/md.c test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || mddev 7054 drivers/md/md.c mddev->reshape_position != MaxSector) mddev 7057 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 7058 drivers/md/md.c if (mddev->raid_disks < raid_disks && mddev 7061 drivers/md/md.c if (mddev->raid_disks > raid_disks && mddev 7066 drivers/md/md.c mddev->delta_disks = raid_disks - mddev->raid_disks; mddev 7067 drivers/md/md.c if (mddev->delta_disks < 0) mddev 7068 drivers/md/md.c mddev->reshape_backwards = 1; mddev 7069 drivers/md/md.c else if (mddev->delta_disks > 0) mddev 7070 drivers/md/md.c mddev->reshape_backwards = 0; mddev 7072 drivers/md/md.c rv = mddev->pers->check_reshape(mddev); mddev 7074 drivers/md/md.c mddev->delta_disks = 0; mddev 7075 drivers/md/md.c mddev->reshape_backwards = 0; mddev 7088 drivers/md/md.c static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev 7095 drivers/md/md.c if (mddev->bitmap && mddev->bitmap_info.offset) mddev 7098 drivers/md/md.c if (mddev->major_version != info->major_version || mddev 7099 drivers/md/md.c mddev->minor_version != info->minor_version || mddev 7101 drivers/md/md.c mddev->ctime != info->ctime || mddev 7102 drivers/md/md.c mddev->level != info->level || mddev 7104 drivers/md/md.c mddev->persistent != !info->not_persistent || mddev 7105 drivers/md/md.c mddev->chunk_sectors != info->chunk_size >> 9 || mddev 7111 drivers/md/md.c if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) mddev 7113 drivers/md/md.c if (mddev->raid_disks != info->raid_disks) mddev 7115 drivers/md/md.c if (mddev->layout != info->layout) mddev 7124 drivers/md/md.c if (mddev->layout != info->layout) { mddev 7129 drivers/md/md.c if (mddev->pers->check_reshape == NULL) mddev 7132 drivers/md/md.c mddev->new_layout = info->layout; mddev 7133 drivers/md/md.c rv = mddev->pers->check_reshape(mddev); mddev 7135 drivers/md/md.c mddev->new_layout = mddev->layout; mddev 7139 drivers/md/md.c if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) mddev 7140 drivers/md/md.c rv = update_size(mddev, (sector_t)info->size * 2); mddev 7142 drivers/md/md.c if (mddev->raid_disks != info->raid_disks) mddev 7143 drivers/md/md.c rv = update_raid_disks(mddev, info->raid_disks); mddev 7146 drivers/md/md.c if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { mddev 7150 drivers/md/md.c if (mddev->recovery || mddev->sync_thread) { mddev 7157 drivers/md/md.c if (mddev->bitmap) { mddev 7161 drivers/md/md.c if (mddev->bitmap_info.default_offset == 0) { mddev 7165 drivers/md/md.c mddev->bitmap_info.offset = mddev 7166 drivers/md/md.c mddev->bitmap_info.default_offset; mddev 7167 drivers/md/md.c mddev->bitmap_info.space = mddev 7168 drivers/md/md.c mddev->bitmap_info.default_space; mddev 7169 drivers/md/md.c bitmap = md_bitmap_create(mddev, -1); mddev 7170 drivers/md/md.c mddev_suspend(mddev); mddev 7172 drivers/md/md.c mddev->bitmap = bitmap; mddev 7173 drivers/md/md.c rv = md_bitmap_load(mddev); mddev 7177 drivers/md/md.c md_bitmap_destroy(mddev); mddev 7178 drivers/md/md.c mddev_resume(mddev); mddev 7181 drivers/md/md.c if (!mddev->bitmap) { mddev 7185 drivers/md/md.c if (mddev->bitmap->storage.file) { mddev 7189 drivers/md/md.c if (mddev->bitmap_info.nodes) { mddev 7191 drivers/md/md.c if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { mddev 7194 drivers/md/md.c md_cluster_ops->unlock_all_bitmaps(mddev); mddev 7198 drivers/md/md.c mddev->bitmap_info.nodes = 0; mddev 7199 drivers/md/md.c md_cluster_ops->leave(mddev); mddev 7201 drivers/md/md.c mddev_suspend(mddev); mddev 7202 drivers/md/md.c md_bitmap_destroy(mddev); mddev 7203 drivers/md/md.c mddev_resume(mddev); mddev 7204 drivers/md/md.c mddev->bitmap_info.offset = 0; mddev 7207 drivers/md/md.c md_update_sb(mddev, 1); mddev 7213 drivers/md/md.c static int set_disk_faulty(struct mddev *mddev, dev_t dev) mddev 7218 drivers/md/md.c if (mddev->pers == NULL) mddev 7222 drivers/md/md.c rdev = md_find_rdev_rcu(mddev, dev); mddev 7226 drivers/md/md.c md_error(mddev, rdev); mddev 7242 drivers/md/md.c struct mddev *mddev = bdev->bd_disk->private_data; mddev 7246 drivers/md/md.c geo->cylinders = mddev->array_sectors / 8; mddev 7281 drivers/md/md.c struct mddev *mddev = NULL; mddev 7320 drivers/md/md.c mddev = bdev->bd_disk->private_data; mddev 7322 drivers/md/md.c if (!mddev) { mddev 7330 drivers/md/md.c if (!mddev->raid_disks && !mddev->external) mddev 7333 drivers/md/md.c err = get_array_info(mddev, argp); mddev 7337 drivers/md/md.c if (!mddev->raid_disks && !mddev->external) mddev 7340 drivers/md/md.c err = get_disk_info(mddev, argp); mddev 7344 drivers/md/md.c err = set_disk_faulty(mddev, new_decode_dev(arg)); mddev 7348 drivers/md/md.c err = get_bitmap_file(mddev, argp); mddev 7359 drivers/md/md.c wait_event_interruptible_timeout(mddev->sb_wait, mddev 7361 drivers/md/md.c &mddev->recovery), mddev 7367 drivers/md/md.c mutex_lock(&mddev->open_mutex); mddev 7368 drivers/md/md.c if (mddev->pers && atomic_read(&mddev->openers) > 1) { mddev 7369 drivers/md/md.c mutex_unlock(&mddev->open_mutex); mddev 7373 drivers/md/md.c WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags)); mddev 7374 drivers/md/md.c set_bit(MD_CLOSING, &mddev->flags); mddev 7376 drivers/md/md.c mutex_unlock(&mddev->open_mutex); mddev 7379 drivers/md/md.c err = mddev_lock(mddev); mddev 7394 drivers/md/md.c if (mddev->pers) { mddev 7395 drivers/md/md.c err = update_array_info(mddev, &info); mddev 7402 drivers/md/md.c if (!list_empty(&mddev->disks)) { mddev 7403 drivers/md/md.c pr_warn("md: array %s already has disks!\n", mdname(mddev)); mddev 7407 drivers/md/md.c if (mddev->raid_disks) { mddev 7408 drivers/md/md.c pr_warn("md: array %s already initialised!\n", mdname(mddev)); mddev 7412 drivers/md/md.c err = set_array_info(mddev, &info); mddev 7425 drivers/md/md.c if ((!mddev->raid_disks && !mddev->external) mddev 7438 drivers/md/md.c err = restart_array(mddev); mddev 7442 drivers/md/md.c err = do_md_stop(mddev, 0, bdev); mddev 7446 drivers/md/md.c err = md_set_readonly(mddev, bdev); mddev 7450 drivers/md/md.c err = hot_remove_disk(mddev, new_decode_dev(arg)); mddev 7458 drivers/md/md.c if (mddev->pers) { mddev 7466 drivers/md/md.c err = add_new_disk(mddev, &info); mddev 7485 drivers/md/md.c if (mddev->ro != 1) mddev 7491 drivers/md/md.c if (mddev->pers) { mddev 7492 drivers/md/md.c err = restart_array(mddev); mddev 7494 drivers/md/md.c mddev->ro = 2; mddev 7495 drivers/md/md.c set_disk_ro(mddev->gendisk, 0); mddev 7505 drivers/md/md.c if (mddev->ro && mddev->pers) { mddev 7506 drivers/md/md.c if (mddev->ro == 2) { mddev 7507 drivers/md/md.c mddev->ro = 0; mddev 7508 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 7509 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 7514 drivers/md/md.c if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { mddev 7515 drivers/md/md.c mddev_unlock(mddev); mddev 7516 drivers/md/md.c wait_event(mddev->sb_wait, mddev 7517 drivers/md/md.c !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && mddev 7518 drivers/md/md.c !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev 7519 drivers/md/md.c mddev_lock_nointr(mddev); mddev 7534 drivers/md/md.c err = add_new_disk(mddev, &info); mddev 7539 drivers/md/md.c if (mddev_is_clustered(mddev)) mddev 7540 drivers/md/md.c md_cluster_ops->new_disk_ack(mddev, false); mddev 7546 drivers/md/md.c err = hot_add_disk(mddev, new_decode_dev(arg)); mddev 7550 drivers/md/md.c err = do_md_run(mddev); mddev 7554 drivers/md/md.c err = set_bitmap_file(mddev, (int)arg); mddev 7563 drivers/md/md.c if (mddev->hold_active == UNTIL_IOCTL && mddev 7565 drivers/md/md.c mddev->hold_active = 0; mddev 7566 drivers/md/md.c mddev_unlock(mddev); mddev 7569 drivers/md/md.c clear_bit(MD_CLOSING, &mddev->flags); mddev 7598 drivers/md/md.c struct mddev *mddev = mddev_find(bdev->bd_dev); mddev 7601 drivers/md/md.c if (!mddev) mddev 7604 drivers/md/md.c if (mddev->gendisk != bdev->bd_disk) { mddev 7608 drivers/md/md.c mddev_put(mddev); mddev 7614 drivers/md/md.c BUG_ON(mddev != bdev->bd_disk->private_data); mddev 7616 drivers/md/md.c if ((err = mutex_lock_interruptible(&mddev->open_mutex))) mddev 7619 drivers/md/md.c if (test_bit(MD_CLOSING, &mddev->flags)) { mddev 7620 drivers/md/md.c mutex_unlock(&mddev->open_mutex); mddev 7626 drivers/md/md.c atomic_inc(&mddev->openers); mddev 7627 drivers/md/md.c mutex_unlock(&mddev->open_mutex); mddev 7632 drivers/md/md.c mddev_put(mddev); mddev 7638 drivers/md/md.c struct mddev *mddev = disk->private_data; mddev 7640 drivers/md/md.c BUG_ON(!mddev); mddev 7641 drivers/md/md.c atomic_dec(&mddev->openers); mddev 7642 drivers/md/md.c mddev_put(mddev); mddev 7647 drivers/md/md.c struct mddev *mddev = disk->private_data; mddev 7649 drivers/md/md.c return mddev->changed; mddev 7654 drivers/md/md.c struct mddev *mddev = disk->private_data; mddev 7656 drivers/md/md.c mddev->changed = 0; mddev 7727 drivers/md/md.c struct mddev *mddev, const char *name) mddev 7738 drivers/md/md.c thread->mddev = mddev; mddev 7742 drivers/md/md.c mdname(thread->mddev), mddev 7770 drivers/md/md.c void md_error(struct mddev *mddev, struct md_rdev *rdev) mddev 7775 drivers/md/md.c if (!mddev->pers || !mddev->pers->error_handler) mddev 7777 drivers/md/md.c mddev->pers->error_handler(mddev,rdev); mddev 7778 drivers/md/md.c if (mddev->degraded) mddev 7779 drivers/md/md.c set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); mddev 7781 drivers/md/md.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 7782 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 7783 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 7784 drivers/md/md.c if (mddev->event_work.func) mddev 7785 drivers/md/md.c queue_work(md_misc_wq, &mddev->event_work); mddev 7786 drivers/md/md.c md_new_event(mddev); mddev 7811 drivers/md/md.c static int status_resync(struct seq_file *seq, struct mddev *mddev) mddev 7819 drivers/md/md.c if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || mddev 7820 drivers/md/md.c test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) mddev 7821 drivers/md/md.c max_sectors = mddev->resync_max_sectors; mddev 7823 drivers/md/md.c max_sectors = mddev->dev_sectors; mddev 7825 drivers/md/md.c resync = mddev->curr_resync; mddev 7827 drivers/md/md.c if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) mddev 7833 drivers/md/md.c resync -= atomic_read(&mddev->recovery_active); mddev 7836 drivers/md/md.c if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { mddev 7839 drivers/md/md.c rdev_for_each(rdev, mddev) mddev 7847 drivers/md/md.c if (mddev->reshape_position != MaxSector) mddev 7853 drivers/md/md.c if (mddev->recovery_cp < MaxSector) { mddev 7890 drivers/md/md.c (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? mddev 7892 drivers/md/md.c (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? mddev 7894 drivers/md/md.c (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? mddev 7917 drivers/md/md.c dt = ((jiffies - mddev->resync_mark) / HZ); mddev 7920 drivers/md/md.c curr_mark_cnt = mddev->curr_mark_cnt; mddev 7921 drivers/md/md.c recovery_active = atomic_read(&mddev->recovery_active); mddev 7922 drivers/md/md.c resync_mark_cnt = mddev->resync_mark_cnt; mddev 7943 drivers/md/md.c struct mddev *mddev; mddev 7954 drivers/md/md.c mddev = list_entry(tmp, struct mddev, all_mddevs); mddev 7955 drivers/md/md.c mddev_get(mddev); mddev 7957 drivers/md/md.c return mddev; mddev 7968 drivers/md/md.c struct mddev *next_mddev, *mddev = v; mddev 7978 drivers/md/md.c tmp = mddev->all_mddevs.next; mddev 7980 drivers/md/md.c next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); mddev 7988 drivers/md/md.c mddev_put(mddev); mddev 7995 drivers/md/md.c struct mddev *mddev = v; mddev 7997 drivers/md/md.c if (mddev && v != (void*)1 && v != (void*)2) mddev 7998 drivers/md/md.c mddev_put(mddev); mddev 8003 drivers/md/md.c struct mddev *mddev = v; mddev 8024 drivers/md/md.c spin_lock(&mddev->lock); mddev 8025 drivers/md/md.c if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { mddev 8026 drivers/md/md.c seq_printf(seq, "%s : %sactive", mdname(mddev), mddev 8027 drivers/md/md.c mddev->pers ? "" : "in"); mddev 8028 drivers/md/md.c if (mddev->pers) { mddev 8029 drivers/md/md.c if (mddev->ro==1) mddev 8031 drivers/md/md.c if (mddev->ro==2) mddev 8033 drivers/md/md.c seq_printf(seq, " %s", mddev->pers->name); mddev 8038 drivers/md/md.c rdev_for_each_rcu(rdev, mddev) { mddev 8058 drivers/md/md.c if (!list_empty(&mddev->disks)) { mddev 8059 drivers/md/md.c if (mddev->pers) mddev 8062 drivers/md/md.c mddev->array_sectors / 2); mddev 8067 drivers/md/md.c if (mddev->persistent) { mddev 8068 drivers/md/md.c if (mddev->major_version != 0 || mddev 8069 drivers/md/md.c mddev->minor_version != 90) { mddev 8071 drivers/md/md.c mddev->major_version, mddev 8072 drivers/md/md.c mddev->minor_version); mddev 8074 drivers/md/md.c } else if (mddev->external) mddev 8076 drivers/md/md.c mddev->metadata_type); mddev 8080 drivers/md/md.c if (mddev->pers) { mddev 8081 drivers/md/md.c mddev->pers->status(seq, mddev); mddev 8083 drivers/md/md.c if (mddev->pers->sync_request) { mddev 8084 drivers/md/md.c if (status_resync(seq, mddev)) mddev 8090 drivers/md/md.c md_bitmap_status(seq, mddev->bitmap); mddev 8094 drivers/md/md.c spin_unlock(&mddev->lock); mddev 8193 drivers/md/md.c int md_setup_cluster(struct mddev *mddev, int nodes) mddev 8206 drivers/md/md.c return md_cluster_ops->join(mddev, nodes); mddev 8209 drivers/md/md.c void md_cluster_stop(struct mddev *mddev) mddev 8213 drivers/md/md.c md_cluster_ops->leave(mddev); mddev 8217 drivers/md/md.c static int is_mddev_idle(struct mddev *mddev, int init) mddev 8225 drivers/md/md.c rdev_for_each_rcu(rdev, mddev) { mddev 8260 drivers/md/md.c void md_done_sync(struct mddev *mddev, int blocks, int ok) mddev 8263 drivers/md/md.c atomic_sub(blocks, &mddev->recovery_active); mddev 8264 drivers/md/md.c wake_up(&mddev->recovery_wait); mddev 8266 drivers/md/md.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 8267 drivers/md/md.c set_bit(MD_RECOVERY_ERROR, &mddev->recovery); mddev 8268 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 8281 drivers/md/md.c bool md_write_start(struct mddev *mddev, struct bio *bi) mddev 8288 drivers/md/md.c BUG_ON(mddev->ro == 1); mddev 8289 drivers/md/md.c if (mddev->ro == 2) { mddev 8291 drivers/md/md.c mddev->ro = 0; mddev 8292 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 8293 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 8294 drivers/md/md.c md_wakeup_thread(mddev->sync_thread); mddev 8298 drivers/md/md.c percpu_ref_get(&mddev->writes_pending); mddev 8300 drivers/md/md.c if (mddev->safemode == 1) mddev 8301 drivers/md/md.c mddev->safemode = 0; mddev 8303 drivers/md/md.c if (mddev->in_sync || mddev->sync_checkers) { mddev 8304 drivers/md/md.c spin_lock(&mddev->lock); mddev 8305 drivers/md/md.c if (mddev->in_sync) { mddev 8306 drivers/md/md.c mddev->in_sync = 0; mddev 8307 drivers/md/md.c set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); mddev 8308 drivers/md/md.c set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); mddev 8309 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 8312 drivers/md/md.c spin_unlock(&mddev->lock); mddev 8316 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 8317 drivers/md/md.c if (!mddev->has_superblocks) mddev 8319 drivers/md/md.c wait_event(mddev->sb_wait, mddev 8320 drivers/md/md.c !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || mddev 8321 drivers/md/md.c mddev->suspended); mddev 8322 drivers/md/md.c if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { mddev 8323 drivers/md/md.c percpu_ref_put(&mddev->writes_pending); mddev 8338 drivers/md/md.c void md_write_inc(struct mddev *mddev, struct bio *bi) mddev 8342 drivers/md/md.c WARN_ON_ONCE(mddev->in_sync || mddev->ro); mddev 8343 drivers/md/md.c percpu_ref_get(&mddev->writes_pending); mddev 8347 drivers/md/md.c void md_write_end(struct mddev *mddev) mddev 8349 drivers/md/md.c percpu_ref_put(&mddev->writes_pending); mddev 8351 drivers/md/md.c if (mddev->safemode == 2) mddev 8352 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 8353 drivers/md/md.c else if (mddev->safemode_delay) mddev 8357 drivers/md/md.c mod_timer(&mddev->safemode_timer, mddev 8358 drivers/md/md.c roundup(jiffies, mddev->safemode_delay) + mddev 8359 drivers/md/md.c mddev->safemode_delay); mddev 8370 drivers/md/md.c void md_allow_write(struct mddev *mddev) mddev 8372 drivers/md/md.c if (!mddev->pers) mddev 8374 drivers/md/md.c if (mddev->ro) mddev 8376 drivers/md/md.c if (!mddev->pers->sync_request) mddev 8379 drivers/md/md.c spin_lock(&mddev->lock); mddev 8380 drivers/md/md.c if (mddev->in_sync) { mddev 8381 drivers/md/md.c mddev->in_sync = 0; mddev 8382 drivers/md/md.c set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); mddev 8383 drivers/md/md.c set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); mddev 8384 drivers/md/md.c if (mddev->safemode_delay && mddev 8385 drivers/md/md.c mddev->safemode == 0) mddev 8386 drivers/md/md.c mddev->safemode = 1; mddev 8387 drivers/md/md.c spin_unlock(&mddev->lock); mddev 8388 drivers/md/md.c md_update_sb(mddev, 0); mddev 8389 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_state); mddev 8391 drivers/md/md.c wait_event(mddev->sb_wait, mddev 8392 drivers/md/md.c !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev 8394 drivers/md/md.c spin_unlock(&mddev->lock); mddev 8403 drivers/md/md.c struct mddev *mddev = thread->mddev; mddev 8404 drivers/md/md.c struct mddev *mddev2; mddev 8420 drivers/md/md.c if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || mddev 8421 drivers/md/md.c test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) mddev 8423 drivers/md/md.c if (mddev->ro) {/* never try to sync a read-only array */ mddev 8424 drivers/md/md.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 8428 drivers/md/md.c if (mddev_is_clustered(mddev)) { mddev 8429 drivers/md/md.c ret = md_cluster_ops->resync_start(mddev); mddev 8433 drivers/md/md.c set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); mddev 8434 drivers/md/md.c if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || mddev 8435 drivers/md/md.c test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || mddev 8436 drivers/md/md.c test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) mddev 8437 drivers/md/md.c && ((unsigned long long)mddev->curr_resync_completed mddev 8438 drivers/md/md.c < (unsigned long long)mddev->resync_max_sectors)) mddev 8442 drivers/md/md.c if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { mddev 8443 drivers/md/md.c if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { mddev 8446 drivers/md/md.c } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { mddev 8451 drivers/md/md.c } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) mddev 8456 drivers/md/md.c mddev->last_sync_action = action ?: desc; mddev 8476 drivers/md/md.c mddev->curr_resync = 2; mddev 8479 drivers/md/md.c if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev 8482 drivers/md/md.c if (mddev2 == mddev) mddev 8484 drivers/md/md.c if (!mddev->parallel_resync mddev 8486 drivers/md/md.c && match_mddev_units(mddev, mddev2)) { mddev 8488 drivers/md/md.c if (mddev < mddev2 && mddev->curr_resync == 2) { mddev 8490 drivers/md/md.c mddev->curr_resync = 1; mddev 8493 drivers/md/md.c if (mddev > mddev2 && mddev->curr_resync == 1) mddev 8503 drivers/md/md.c if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && mddev 8504 drivers/md/md.c mddev2->curr_resync >= mddev->curr_resync) { mddev 8508 drivers/md/md.c desc, mdname(mddev), mddev 8521 drivers/md/md.c } while (mddev->curr_resync < 2); mddev 8524 drivers/md/md.c if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { mddev 8528 drivers/md/md.c max_sectors = mddev->resync_max_sectors; mddev 8529 drivers/md/md.c atomic64_set(&mddev->resync_mismatches, 0); mddev 8531 drivers/md/md.c if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) mddev 8532 drivers/md/md.c j = mddev->resync_min; mddev 8533 drivers/md/md.c else if (!mddev->bitmap) mddev 8534 drivers/md/md.c j = mddev->recovery_cp; mddev 8536 drivers/md/md.c } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { mddev 8537 drivers/md/md.c max_sectors = mddev->resync_max_sectors; mddev 8543 drivers/md/md.c if (mddev_is_clustered(mddev) && mddev 8544 drivers/md/md.c mddev->reshape_position != MaxSector) mddev 8545 drivers/md/md.c j = mddev->reshape_position; mddev 8548 drivers/md/md.c max_sectors = mddev->dev_sectors; mddev 8551 drivers/md/md.c rdev_for_each_rcu(rdev, mddev) mddev 8568 drivers/md/md.c if (mddev->bitmap) { mddev 8569 drivers/md/md.c mddev->pers->quiesce(mddev, 1); mddev 8570 drivers/md/md.c mddev->pers->quiesce(mddev, 0); mddev 8574 drivers/md/md.c pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); mddev 8575 drivers/md/md.c pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); mddev 8577 drivers/md/md.c speed_max(mddev), desc); mddev 8579 drivers/md/md.c is_mddev_idle(mddev, 1); /* this initializes IO event counters */ mddev 8587 drivers/md/md.c mddev->resync_mark = mark[last_mark]; mddev 8588 drivers/md/md.c mddev->resync_mark_cnt = mark_cnt[last_mark]; mddev 8597 drivers/md/md.c atomic_set(&mddev->recovery_active, 0); mddev 8602 drivers/md/md.c desc, mdname(mddev)); mddev 8603 drivers/md/md.c mddev->curr_resync = j; mddev 8605 drivers/md/md.c mddev->curr_resync = 3; /* no longer delayed */ mddev 8606 drivers/md/md.c mddev->curr_resync_completed = j; mddev 8607 drivers/md/md.c sysfs_notify(&mddev->kobj, NULL, "sync_completed"); mddev 8608 drivers/md/md.c md_new_event(mddev); mddev 8617 drivers/md/md.c if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev 8618 drivers/md/md.c ((mddev->curr_resync > mddev->curr_resync_completed && mddev 8619 drivers/md/md.c (mddev->curr_resync - mddev->curr_resync_completed) mddev 8622 drivers/md/md.c (j - mddev->curr_resync_completed)*2 mddev 8623 drivers/md/md.c >= mddev->resync_max - mddev->curr_resync_completed || mddev 8624 drivers/md/md.c mddev->curr_resync_completed > mddev->resync_max mddev 8627 drivers/md/md.c wait_event(mddev->recovery_wait, mddev 8628 drivers/md/md.c atomic_read(&mddev->recovery_active) == 0); mddev 8629 drivers/md/md.c mddev->curr_resync_completed = j; mddev 8630 drivers/md/md.c if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && mddev 8631 drivers/md/md.c j > mddev->recovery_cp) mddev 8632 drivers/md/md.c mddev->recovery_cp = j; mddev 8634 drivers/md/md.c set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); mddev 8635 drivers/md/md.c sysfs_notify(&mddev->kobj, NULL, "sync_completed"); mddev 8638 drivers/md/md.c while (j >= mddev->resync_max && mddev 8639 drivers/md/md.c !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { mddev 8645 drivers/md/md.c wait_event_interruptible(mddev->recovery_wait, mddev 8646 drivers/md/md.c mddev->resync_max > j mddev 8648 drivers/md/md.c &mddev->recovery)); mddev 8651 drivers/md/md.c if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev 8654 drivers/md/md.c sectors = mddev->pers->sync_request(mddev, j, &skipped); mddev 8656 drivers/md/md.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 8662 drivers/md/md.c atomic_add(sectors, &mddev->recovery_active); mddev 8665 drivers/md/md.c if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev 8673 drivers/md/md.c mddev->curr_resync = j; mddev 8674 drivers/md/md.c mddev->curr_mark_cnt = io_sectors; mddev 8679 drivers/md/md.c md_new_event(mddev); mddev 8690 drivers/md/md.c mddev->resync_mark = mark[next]; mddev 8691 drivers/md/md.c mddev->resync_mark_cnt = mark_cnt[next]; mddev 8693 drivers/md/md.c mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); mddev 8697 drivers/md/md.c if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev 8710 drivers/md/md.c recovery_done = io_sectors - atomic_read(&mddev->recovery_active); mddev 8711 drivers/md/md.c currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 mddev 8712 drivers/md/md.c /((jiffies-mddev->resync_mark)/HZ +1) +1; mddev 8714 drivers/md/md.c if (currspeed > speed_min(mddev)) { mddev 8715 drivers/md/md.c if (currspeed > speed_max(mddev)) { mddev 8719 drivers/md/md.c if (!is_mddev_idle(mddev, 0)) { mddev 8724 drivers/md/md.c wait_event(mddev->recovery_wait, mddev 8725 drivers/md/md.c !atomic_read(&mddev->recovery_active)); mddev 8729 drivers/md/md.c pr_info("md: %s: %s %s.\n",mdname(mddev), desc, mddev 8730 drivers/md/md.c test_bit(MD_RECOVERY_INTR, &mddev->recovery) mddev 8736 drivers/md/md.c wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); mddev 8738 drivers/md/md.c if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev 8739 drivers/md/md.c !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && mddev 8740 drivers/md/md.c mddev->curr_resync > 3) { mddev 8741 drivers/md/md.c mddev->curr_resync_completed = mddev->curr_resync; mddev 8742 drivers/md/md.c sysfs_notify(&mddev->kobj, NULL, "sync_completed"); mddev 8744 drivers/md/md.c mddev->pers->sync_request(mddev, max_sectors, &skipped); mddev 8746 drivers/md/md.c if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && mddev 8747 drivers/md/md.c mddev->curr_resync > 3) { mddev 8748 drivers/md/md.c if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { mddev 8749 drivers/md/md.c if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { mddev 8750 drivers/md/md.c if (mddev->curr_resync >= mddev->recovery_cp) { mddev 8752 drivers/md/md.c desc, mdname(mddev)); mddev 8754 drivers/md/md.c &mddev->recovery)) mddev 8755 drivers/md/md.c mddev->recovery_cp = mddev 8756 drivers/md/md.c mddev->curr_resync_completed; mddev 8758 drivers/md/md.c mddev->recovery_cp = mddev 8759 drivers/md/md.c mddev->curr_resync; mddev 8762 drivers/md/md.c mddev->recovery_cp = MaxSector; mddev 8764 drivers/md/md.c if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev 8765 drivers/md/md.c mddev->curr_resync = MaxSector; mddev 8766 drivers/md/md.c if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev 8767 drivers/md/md.c test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { mddev 8769 drivers/md/md.c rdev_for_each_rcu(rdev, mddev) mddev 8771 drivers/md/md.c mddev->delta_disks >= 0 && mddev 8775 drivers/md/md.c rdev->recovery_offset < mddev->curr_resync) mddev 8776 drivers/md/md.c rdev->recovery_offset = mddev->curr_resync; mddev 8785 drivers/md/md.c set_mask_bits(&mddev->sb_flags, 0, mddev 8788 drivers/md/md.c if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev 8789 drivers/md/md.c !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && mddev 8790 drivers/md/md.c mddev->delta_disks > 0 && mddev 8791 drivers/md/md.c mddev->pers->finish_reshape && mddev 8792 drivers/md/md.c mddev->pers->size && mddev 8793 drivers/md/md.c mddev->queue) { mddev 8794 drivers/md/md.c mddev_lock_nointr(mddev); mddev 8795 drivers/md/md.c md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); mddev 8796 drivers/md/md.c mddev_unlock(mddev); mddev 8797 drivers/md/md.c if (!mddev_is_clustered(mddev)) { mddev 8798 drivers/md/md.c set_capacity(mddev->gendisk, mddev->array_sectors); mddev 8799 drivers/md/md.c revalidate_disk(mddev->gendisk); mddev 8803 drivers/md/md.c spin_lock(&mddev->lock); mddev 8804 drivers/md/md.c if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { mddev 8806 drivers/md/md.c if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) mddev 8807 drivers/md/md.c mddev->resync_min = 0; mddev 8808 drivers/md/md.c mddev->resync_max = MaxSector; mddev 8809 drivers/md/md.c } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) mddev 8810 drivers/md/md.c mddev->resync_min = mddev->curr_resync_completed; mddev 8811 drivers/md/md.c set_bit(MD_RECOVERY_DONE, &mddev->recovery); mddev 8812 drivers/md/md.c mddev->curr_resync = 0; mddev 8813 drivers/md/md.c spin_unlock(&mddev->lock); mddev 8816 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 8821 drivers/md/md.c static int remove_and_add_spares(struct mddev *mddev, mddev 8829 drivers/md/md.c if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) mddev 8833 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 8851 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 8859 drivers/md/md.c if (mddev->pers->hot_remove_disk( mddev 8860 drivers/md/md.c mddev, rdev) == 0) { mddev 8861 drivers/md/md.c sysfs_unlink_rdev(mddev, rdev); mddev 8871 drivers/md/md.c if (removed && mddev->kobj.sd) mddev 8872 drivers/md/md.c sysfs_notify(&mddev->kobj, NULL, "degraded"); mddev 8877 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 8892 drivers/md/md.c if (mddev->ro && mddev 8899 drivers/md/md.c if (mddev->pers-> mddev 8900 drivers/md/md.c hot_add_disk(mddev, rdev) == 0) { mddev 8901 drivers/md/md.c if (sysfs_link_rdev(mddev, rdev)) mddev 8905 drivers/md/md.c md_new_event(mddev); mddev 8906 drivers/md/md.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 8911 drivers/md/md.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 8917 drivers/md/md.c struct mddev *mddev = container_of(ws, struct mddev, del_work); mddev 8919 drivers/md/md.c mddev->sync_thread = md_register_thread(md_do_sync, mddev 8920 drivers/md/md.c mddev, mddev 8922 drivers/md/md.c if (!mddev->sync_thread) { mddev 8924 drivers/md/md.c mdname(mddev)); mddev 8926 drivers/md/md.c clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 8927 drivers/md/md.c clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); mddev 8928 drivers/md/md.c clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); mddev 8929 drivers/md/md.c clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); mddev 8930 drivers/md/md.c clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev 8933 drivers/md/md.c &mddev->recovery)) mddev 8934 drivers/md/md.c if (mddev->sysfs_action) mddev 8935 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_action); mddev 8937 drivers/md/md.c md_wakeup_thread(mddev->sync_thread); mddev 8938 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_action); mddev 8939 drivers/md/md.c md_new_event(mddev); mddev 8964 drivers/md/md.c void md_check_recovery(struct mddev *mddev) mddev 8966 drivers/md/md.c if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) { mddev 8970 drivers/md/md.c set_bit(MD_UPDATING_SB, &mddev->flags); mddev 8972 drivers/md/md.c if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags)) mddev 8973 drivers/md/md.c md_update_sb(mddev, 0); mddev 8974 drivers/md/md.c clear_bit_unlock(MD_UPDATING_SB, &mddev->flags); mddev 8975 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 8978 drivers/md/md.c if (mddev->suspended) mddev 8981 drivers/md/md.c if (mddev->bitmap) mddev 8982 drivers/md/md.c md_bitmap_daemon_work(mddev); mddev 8985 drivers/md/md.c if (mddev->pers->sync_request && !mddev->external) { mddev 8987 drivers/md/md.c mdname(mddev)); mddev 8988 drivers/md/md.c mddev->safemode = 2; mddev 8993 drivers/md/md.c if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) mddev 8996 drivers/md/md.c (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) || mddev 8997 drivers/md/md.c test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || mddev 8998 drivers/md/md.c test_bit(MD_RECOVERY_DONE, &mddev->recovery) || mddev 8999 drivers/md/md.c (mddev->external == 0 && mddev->safemode == 1) || mddev 9000 drivers/md/md.c (mddev->safemode == 2 mddev 9001 drivers/md/md.c && !mddev->in_sync && mddev->recovery_cp == MaxSector) mddev 9005 drivers/md/md.c if (mddev_trylock(mddev)) { mddev 9007 drivers/md/md.c bool try_set_sync = mddev->safemode != 0; mddev 9009 drivers/md/md.c if (!mddev->external && mddev->safemode == 1) mddev 9010 drivers/md/md.c mddev->safemode = 0; mddev 9012 drivers/md/md.c if (mddev->ro) { mddev 9014 drivers/md/md.c if (!mddev->external && mddev->in_sync) mddev 9020 drivers/md/md.c rdev_for_each(rdev, mddev) mddev 9029 drivers/md/md.c remove_and_add_spares(mddev, NULL); mddev 9033 drivers/md/md.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 9034 drivers/md/md.c md_reap_sync_thread(mddev); mddev 9035 drivers/md/md.c clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); mddev 9036 drivers/md/md.c clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 9037 drivers/md/md.c clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); mddev 9041 drivers/md/md.c if (mddev_is_clustered(mddev)) { mddev 9046 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 9053 drivers/md/md.c if (try_set_sync && !mddev->external && !mddev->in_sync) { mddev 9054 drivers/md/md.c spin_lock(&mddev->lock); mddev 9055 drivers/md/md.c set_in_sync(mddev); mddev 9056 drivers/md/md.c spin_unlock(&mddev->lock); mddev 9059 drivers/md/md.c if (mddev->sb_flags) mddev 9060 drivers/md/md.c md_update_sb(mddev, 0); mddev 9062 drivers/md/md.c if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && mddev 9063 drivers/md/md.c !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { mddev 9065 drivers/md/md.c clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 9068 drivers/md/md.c if (mddev->sync_thread) { mddev 9069 drivers/md/md.c md_reap_sync_thread(mddev); mddev 9075 drivers/md/md.c mddev->curr_resync_completed = 0; mddev 9076 drivers/md/md.c spin_lock(&mddev->lock); mddev 9077 drivers/md/md.c set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev 9078 drivers/md/md.c spin_unlock(&mddev->lock); mddev 9082 drivers/md/md.c clear_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 9083 drivers/md/md.c clear_bit(MD_RECOVERY_DONE, &mddev->recovery); mddev 9085 drivers/md/md.c if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || mddev 9086 drivers/md/md.c test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) mddev 9095 drivers/md/md.c if (mddev->reshape_position != MaxSector) { mddev 9096 drivers/md/md.c if (mddev->pers->check_reshape == NULL || mddev 9097 drivers/md/md.c mddev->pers->check_reshape(mddev) != 0) mddev 9100 drivers/md/md.c set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); mddev 9101 drivers/md/md.c clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); mddev 9102 drivers/md/md.c } else if ((spares = remove_and_add_spares(mddev, NULL))) { mddev 9103 drivers/md/md.c clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 9104 drivers/md/md.c clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); mddev 9105 drivers/md/md.c clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); mddev 9106 drivers/md/md.c set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); mddev 9107 drivers/md/md.c } else if (mddev->recovery_cp < MaxSector) { mddev 9108 drivers/md/md.c set_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 9109 drivers/md/md.c clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); mddev 9110 drivers/md/md.c } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) mddev 9114 drivers/md/md.c if (mddev->pers->sync_request) { mddev 9120 drivers/md/md.c md_bitmap_write_all(mddev->bitmap); mddev 9122 drivers/md/md.c INIT_WORK(&mddev->del_work, md_start_sync); mddev 9123 drivers/md/md.c queue_work(md_misc_wq, &mddev->del_work); mddev 9127 drivers/md/md.c if (!mddev->sync_thread) { mddev 9128 drivers/md/md.c clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev 9131 drivers/md/md.c &mddev->recovery)) mddev 9132 drivers/md/md.c if (mddev->sysfs_action) mddev 9133 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_action); mddev 9136 drivers/md/md.c wake_up(&mddev->sb_wait); mddev 9137 drivers/md/md.c mddev_unlock(mddev); mddev 9142 drivers/md/md.c void md_reap_sync_thread(struct mddev *mddev) mddev 9145 drivers/md/md.c sector_t old_dev_sectors = mddev->dev_sectors; mddev 9149 drivers/md/md.c md_unregister_thread(&mddev->sync_thread); mddev 9150 drivers/md/md.c if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && mddev 9151 drivers/md/md.c !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && mddev 9152 drivers/md/md.c mddev->degraded != mddev->raid_disks) { mddev 9155 drivers/md/md.c if (mddev->pers->spare_active(mddev)) { mddev 9156 drivers/md/md.c sysfs_notify(&mddev->kobj, NULL, mddev 9158 drivers/md/md.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 9161 drivers/md/md.c if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev 9162 drivers/md/md.c mddev->pers->finish_reshape) { mddev 9163 drivers/md/md.c mddev->pers->finish_reshape(mddev); mddev 9164 drivers/md/md.c if (mddev_is_clustered(mddev)) mddev 9171 drivers/md/md.c if (!mddev->degraded) mddev 9172 drivers/md/md.c rdev_for_each(rdev, mddev) mddev 9175 drivers/md/md.c md_update_sb(mddev, 1); mddev 9179 drivers/md/md.c if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) mddev 9180 drivers/md/md.c md_cluster_ops->resync_finish(mddev); mddev 9181 drivers/md/md.c clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev 9182 drivers/md/md.c clear_bit(MD_RECOVERY_DONE, &mddev->recovery); mddev 9183 drivers/md/md.c clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 9184 drivers/md/md.c clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); mddev 9185 drivers/md/md.c clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); mddev 9186 drivers/md/md.c clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); mddev 9192 drivers/md/md.c if (mddev_is_clustered(mddev) && is_reshaped mddev 9193 drivers/md/md.c && !test_bit(MD_CLOSING, &mddev->flags)) mddev 9194 drivers/md/md.c md_cluster_ops->update_size(mddev, old_dev_sectors); mddev 9197 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 9198 drivers/md/md.c sysfs_notify_dirent_safe(mddev->sysfs_action); mddev 9199 drivers/md/md.c md_new_event(mddev); mddev 9200 drivers/md/md.c if (mddev->event_work.func) mddev 9201 drivers/md/md.c queue_work(md_misc_wq, &mddev->event_work); mddev 9205 drivers/md/md.c void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) mddev 9212 drivers/md/md.c rdev_dec_pending(rdev, mddev); mddev 9216 drivers/md/md.c void md_finish_reshape(struct mddev *mddev) mddev 9221 drivers/md/md.c rdev_for_each(rdev, mddev) { mddev 9237 drivers/md/md.c struct mddev *mddev = rdev->mddev; mddev 9250 drivers/md/md.c set_mask_bits(&mddev->sb_flags, 0, mddev 9252 drivers/md/md.c md_wakeup_thread(rdev->mddev->thread); mddev 9278 drivers/md/md.c struct mddev *mddev; mddev 9281 drivers/md/md.c for_each_mddev(mddev, tmp) { mddev 9282 drivers/md/md.c if (mddev_trylock(mddev)) { mddev 9283 drivers/md/md.c if (mddev->pers) mddev 9284 drivers/md/md.c __md_stop_writes(mddev); mddev 9285 drivers/md/md.c if (mddev->persistent) mddev 9286 drivers/md/md.c mddev->safemode = 2; mddev 9287 drivers/md/md.c mddev_unlock(mddev); mddev 9356 drivers/md/md.c static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) mddev 9367 drivers/md/md.c if (mddev->dev_sectors != le64_to_cpu(sb->size)) { mddev 9368 drivers/md/md.c ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); mddev 9372 drivers/md/md.c md_bitmap_update_sb(mddev->bitmap); mddev 9376 drivers/md/md.c rdev_for_each(rdev2, mddev) { mddev 9401 drivers/md/md.c ret = remove_and_add_spares(mddev, rdev2); mddev 9406 drivers/md/md.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 9407 drivers/md/md.c md_wakeup_thread(mddev->thread); mddev 9415 drivers/md/md.c md_error(mddev, rdev2); mddev 9421 drivers/md/md.c if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) mddev 9422 drivers/md/md.c update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); mddev 9428 drivers/md/md.c if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && mddev 9434 drivers/md/md.c mddev->reshape_position = le64_to_cpu(sb->reshape_position); mddev 9435 drivers/md/md.c if (mddev->pers->update_reshape_pos) mddev 9436 drivers/md/md.c mddev->pers->update_reshape_pos(mddev); mddev 9437 drivers/md/md.c if (mddev->pers->start_reshape) mddev 9438 drivers/md/md.c mddev->pers->start_reshape(mddev); mddev 9439 drivers/md/md.c } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && mddev 9440 drivers/md/md.c mddev->reshape_position != MaxSector && mddev 9443 drivers/md/md.c mddev->reshape_position = MaxSector; mddev 9444 drivers/md/md.c if (mddev->pers->update_reshape_pos) mddev 9445 drivers/md/md.c mddev->pers->update_reshape_pos(mddev); mddev 9449 drivers/md/md.c mddev->events = le64_to_cpu(sb->events); mddev 9452 drivers/md/md.c static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) mddev 9466 drivers/md/md.c err = super_types[mddev->major_version]. mddev 9467 drivers/md/md.c load_super(rdev, NULL, mddev->minor_version); mddev 9492 drivers/md/md.c mddev->pers->spare_active(mddev)) mddev 9493 drivers/md/md.c sysfs_notify(&mddev->kobj, NULL, "degraded"); mddev 9499 drivers/md/md.c void md_reload_sb(struct mddev *mddev, int nr) mddev 9505 drivers/md/md.c rdev_for_each_rcu(rdev, mddev) { mddev 9515 drivers/md/md.c err = read_rdev(mddev, rdev); mddev 9519 drivers/md/md.c check_sb_changes(mddev, rdev); mddev 9522 drivers/md/md.c rdev_for_each_rcu(rdev, mddev) { mddev 9524 drivers/md/md.c read_rdev(mddev, rdev); mddev 9600 drivers/md/md.c struct mddev *mddev; mddev 9624 drivers/md/md.c for_each_mddev(mddev, tmp) { mddev 9625 drivers/md/md.c export_array(mddev); mddev 9626 drivers/md/md.c mddev->ctime = 0; mddev 9627 drivers/md/md.c mddev->hold_active = 0; mddev 42 drivers/md/md.h struct mddev *mddev; /* RAID array if running */ mddev 491 drivers/md/md.h void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); mddev 518 drivers/md/md.h static inline int __must_check mddev_lock(struct mddev *mddev) mddev 520 drivers/md/md.h return mutex_lock_interruptible(&mddev->reconfig_mutex); mddev 526 drivers/md/md.h static inline void mddev_lock_nointr(struct mddev *mddev) mddev 528 drivers/md/md.h mutex_lock(&mddev->reconfig_mutex); mddev 531 drivers/md/md.h static inline int mddev_trylock(struct mddev *mddev) mddev 533 drivers/md/md.h return mutex_trylock(&mddev->reconfig_mutex); mddev 535 drivers/md/md.h extern void mddev_unlock(struct mddev *mddev); mddev 553 drivers/md/md.h bool __must_check (*make_request)(struct mddev *mddev, struct bio *bio); mddev 558 drivers/md/md.h int (*run)(struct mddev *mddev); mddev 560 drivers/md/md.h int (*start)(struct mddev *mddev); mddev 561 drivers/md/md.h void (*free)(struct mddev *mddev, void *priv); mddev 562 drivers/md/md.h void (*status)(struct seq_file *seq, struct mddev *mddev); mddev 566 drivers/md/md.h void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev); mddev 567 drivers/md/md.h int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); mddev 568 drivers/md/md.h int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev); mddev 569 drivers/md/md.h int (*spare_active) (struct mddev *mddev); mddev 570 drivers/md/md.h sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped); mddev 571 drivers/md/md.h int (*resize) (struct mddev *mddev, sector_t sectors); mddev 572 drivers/md/md.h sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); mddev 573 drivers/md/md.h int (*check_reshape) (struct mddev *mddev); mddev 574 drivers/md/md.h int (*start_reshape) (struct mddev *mddev); mddev 575 drivers/md/md.h void (*finish_reshape) (struct mddev *mddev); mddev 576 drivers/md/md.h void (*update_reshape_pos) (struct mddev *mddev); mddev 581 drivers/md/md.h void (*quiesce) (struct mddev *mddev, int quiesce); mddev 591 drivers/md/md.h void *(*takeover) (struct mddev *mddev); mddev 594 drivers/md/md.h int (*congested)(struct mddev *mddev, int bits); mddev 596 drivers/md/md.h int (*change_consistency_policy)(struct mddev *mddev, const char *buf); mddev 601 drivers/md/md.h ssize_t (*show)(struct mddev *, char *); mddev 602 drivers/md/md.h ssize_t (*store)(struct mddev *, const char *, size_t); mddev 618 drivers/md/md.h static inline char * mdname (struct mddev * mddev) mddev 620 drivers/md/md.h return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; mddev 623 drivers/md/md.h static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) mddev 628 drivers/md/md.h mddev->kobj.sd) { mddev 630 drivers/md/md.h return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); mddev 635 drivers/md/md.h static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) mddev 640 drivers/md/md.h mddev->kobj.sd) { mddev 642 drivers/md/md.h sysfs_remove_link(&mddev->kobj, nm); mddev 656 drivers/md/md.h #define rdev_for_each(rdev, mddev) \ mddev 657 drivers/md/md.h list_for_each_entry(rdev, &((mddev)->disks), same_set) mddev 659 drivers/md/md.h #define rdev_for_each_safe(rdev, tmp, mddev) \ mddev 660 drivers/md/md.h list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) mddev 662 drivers/md/md.h #define rdev_for_each_rcu(rdev, mddev) \ mddev 663 drivers/md/md.h list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) mddev 667 drivers/md/md.h struct mddev *mddev; mddev 687 drivers/md/md.h extern int md_setup_cluster(struct mddev *mddev, int nodes); mddev 688 drivers/md/md.h extern void md_cluster_stop(struct mddev *mddev); mddev 691 drivers/md/md.h struct mddev *mddev, mddev 695 drivers/md/md.h extern void md_check_recovery(struct mddev *mddev); mddev 696 drivers/md/md.h extern void md_reap_sync_thread(struct mddev *mddev); mddev 697 drivers/md/md.h extern int mddev_init_writes_pending(struct mddev *mddev); mddev 698 drivers/md/md.h extern bool md_write_start(struct mddev *mddev, struct bio *bi); mddev 699 drivers/md/md.h extern void md_write_inc(struct mddev *mddev, struct bio *bi); mddev 700 drivers/md/md.h extern void md_write_end(struct mddev *mddev); mddev 701 drivers/md/md.h extern void md_done_sync(struct mddev *mddev, int blocks, int ok); mddev 702 drivers/md/md.h extern void md_error(struct mddev *mddev, struct md_rdev *rdev); mddev 703 drivers/md/md.h extern void md_finish_reshape(struct mddev *mddev); mddev 705 drivers/md/md.h extern int mddev_congested(struct mddev *mddev, int bits); mddev 706 drivers/md/md.h extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); mddev 707 drivers/md/md.h extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, mddev 709 drivers/md/md.h extern int md_super_wait(struct mddev *mddev); mddev 714 drivers/md/md.h extern void md_new_event(struct mddev *mddev); mddev 715 drivers/md/md.h extern void md_allow_write(struct mddev *mddev); mddev 716 drivers/md/md.h extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev); mddev 717 drivers/md/md.h extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); mddev 718 drivers/md/md.h extern int md_check_no_bitmap(struct mddev *mddev); mddev 719 drivers/md/md.h extern int md_integrity_register(struct mddev *mddev); mddev 720 drivers/md/md.h extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev); mddev 723 drivers/md/md.h extern void mddev_init(struct mddev *mddev); mddev 724 drivers/md/md.h extern int md_run(struct mddev *mddev); mddev 725 drivers/md/md.h extern int md_start(struct mddev *mddev); mddev 726 drivers/md/md.h extern void md_stop(struct mddev *mddev); mddev 727 drivers/md/md.h extern void md_stop_writes(struct mddev *mddev); mddev 731 drivers/md/md.h extern void md_handle_request(struct mddev *mddev, struct bio *bio); mddev 732 drivers/md/md.h extern void mddev_suspend(struct mddev *mddev); mddev 733 drivers/md/md.h extern void mddev_resume(struct mddev *mddev); mddev 735 drivers/md/md.h struct mddev *mddev); mddev 737 drivers/md/md.h extern void md_reload_sb(struct mddev *mddev, int raid_disk); mddev 738 drivers/md/md.h extern void md_update_sb(struct mddev *mddev, int force); mddev 740 drivers/md/md.h extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev, mddev 742 drivers/md/md.h struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); mddev 743 drivers/md/md.h struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev); mddev 750 drivers/md/md.h if (!test_and_set_bit(MD_BROKEN, &rdev->mddev->flags)) mddev 752 drivers/md/md.h mdname(rdev->mddev), md_type); mddev 758 drivers/md/md.h static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) mddev 762 drivers/md/md.h set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 763 drivers/md/md.h md_wakeup_thread(mddev->thread); mddev 768 drivers/md/md.h static inline int mddev_is_clustered(struct mddev *mddev) mddev 770 drivers/md/md.h return mddev->cluster_info && mddev->bitmap_info.nodes > 1; mddev 774 drivers/md/md.h static inline void mddev_clear_unsupported_flags(struct mddev *mddev, mddev 777 drivers/md/md.h mddev->flags &= ~unsupported_flags; mddev 780 drivers/md/md.h static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio) mddev 784 drivers/md/md.h mddev->queue->limits.max_write_same_sectors = 0; mddev 787 drivers/md/md.h static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio) mddev 791 drivers/md/md.h mddev->queue->limits.max_write_zeroes_sectors = 0; mddev 32 drivers/md/raid0.c static int raid0_congested(struct mddev *mddev, int bits) mddev 34 drivers/md/raid0.c struct r0conf *conf = mddev->private; mddev 50 drivers/md/raid0.c static void dump_zones(struct mddev *mddev) mddev 56 drivers/md/raid0.c struct r0conf *conf = mddev->private; mddev 59 drivers/md/raid0.c mdname(mddev), mddev 80 drivers/md/raid0.c static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) mddev 95 drivers/md/raid0.c rdev_for_each(rdev1, mddev) { mddev 97 drivers/md/raid0.c mdname(mddev), mddev 103 drivers/md/raid0.c sector_div(sectors, mddev->chunk_sectors); mddev 104 drivers/md/raid0.c rdev1->sectors = sectors * mddev->chunk_sectors; mddev 109 drivers/md/raid0.c rdev_for_each(rdev2, mddev) { mddev 112 drivers/md/raid0.c mdname(mddev), mddev 119 drivers/md/raid0.c mdname(mddev)); mddev 128 drivers/md/raid0.c mdname(mddev)); mddev 133 drivers/md/raid0.c mdname(mddev)); mddev 137 drivers/md/raid0.c mdname(mddev)); mddev 140 drivers/md/raid0.c mdname(mddev), conf->nr_strip_zones); mddev 144 drivers/md/raid0.c mdname(mddev), conf->nr_strip_zones); mddev 148 drivers/md/raid0.c } else if (mddev->layout == RAID0_ORIG_LAYOUT || mddev 149 drivers/md/raid0.c mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) { mddev 150 drivers/md/raid0.c conf->layout = mddev->layout; mddev 156 drivers/md/raid0.c mdname(mddev)); mddev 165 drivers/md/raid0.c if ((mddev->chunk_sectors << 9) % blksize) { mddev 167 drivers/md/raid0.c mdname(mddev), mddev 168 drivers/md/raid0.c mddev->chunk_sectors << 9, blksize); mddev 181 drivers/md/raid0.c mddev->raid_disks), mddev 194 drivers/md/raid0.c rdev_for_each(rdev1, mddev) { mddev 197 drivers/md/raid0.c if (mddev->level == 10) { mddev 203 drivers/md/raid0.c if (mddev->level == 1) { mddev 213 drivers/md/raid0.c mdname(mddev)); mddev 216 drivers/md/raid0.c if (j >= mddev->raid_disks) { mddev 218 drivers/md/raid0.c mdname(mddev), j); mddev 223 drivers/md/raid0.c mdname(mddev), j); mddev 232 drivers/md/raid0.c if (cnt != mddev->raid_disks) { mddev 234 drivers/md/raid0.c mdname(mddev), cnt, mddev->raid_disks); mddev 248 drivers/md/raid0.c dev = conf->devlist + i * mddev->raid_disks; mddev 250 drivers/md/raid0.c pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i); mddev 259 drivers/md/raid0.c mdname(mddev), mddev 265 drivers/md/raid0.c mdname(mddev), mddev 272 drivers/md/raid0.c mdname(mddev), mddev 280 drivers/md/raid0.c mdname(mddev), mddev 287 drivers/md/raid0.c mdname(mddev), mddev 291 drivers/md/raid0.c pr_debug("md/raid0:%s: done.\n", mdname(mddev)); mddev 326 drivers/md/raid0.c static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone, mddev 331 drivers/md/raid0.c struct r0conf *conf = mddev->private; mddev 333 drivers/md/raid0.c unsigned int chunk_sects = mddev->chunk_sectors; mddev 359 drivers/md/raid0.c static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) mddev 367 drivers/md/raid0.c rdev_for_each(rdev, mddev) mddev 369 drivers/md/raid0.c ~(sector_t)(mddev->chunk_sectors-1)); mddev 374 drivers/md/raid0.c static void raid0_free(struct mddev *mddev, void *priv); mddev 376 drivers/md/raid0.c static int raid0_run(struct mddev *mddev) mddev 381 drivers/md/raid0.c if (mddev->chunk_sectors == 0) { mddev 382 drivers/md/raid0.c pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev)); mddev 385 drivers/md/raid0.c if (md_check_no_bitmap(mddev)) mddev 389 drivers/md/raid0.c if (mddev->private == NULL) { mddev 390 drivers/md/raid0.c ret = create_strip_zones(mddev, &conf); mddev 393 drivers/md/raid0.c mddev->private = conf; mddev 395 drivers/md/raid0.c conf = mddev->private; mddev 396 drivers/md/raid0.c if (mddev->queue) { mddev 400 drivers/md/raid0.c blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); mddev 401 drivers/md/raid0.c blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); mddev 402 drivers/md/raid0.c blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); mddev 403 drivers/md/raid0.c blk_queue_max_discard_sectors(mddev->queue, UINT_MAX); mddev 405 drivers/md/raid0.c blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); mddev 406 drivers/md/raid0.c blk_queue_io_opt(mddev->queue, mddev 407 drivers/md/raid0.c (mddev->chunk_sectors << 9) * mddev->raid_disks); mddev 409 drivers/md/raid0.c rdev_for_each(rdev, mddev) { mddev 410 drivers/md/raid0.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 416 drivers/md/raid0.c blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); mddev 418 drivers/md/raid0.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); mddev 422 drivers/md/raid0.c md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); mddev 425 drivers/md/raid0.c mdname(mddev), mddev 426 drivers/md/raid0.c (unsigned long long)mddev->array_sectors); mddev 428 drivers/md/raid0.c if (mddev->queue) { mddev 438 drivers/md/raid0.c int stripe = mddev->raid_disks * mddev 439 drivers/md/raid0.c (mddev->chunk_sectors << 9) / PAGE_SIZE; mddev 440 drivers/md/raid0.c if (mddev->queue->backing_dev_info->ra_pages < 2* stripe) mddev 441 drivers/md/raid0.c mddev->queue->backing_dev_info->ra_pages = 2* stripe; mddev 444 drivers/md/raid0.c dump_zones(mddev); mddev 446 drivers/md/raid0.c ret = md_integrity_register(mddev); mddev 451 drivers/md/raid0.c static void raid0_free(struct mddev *mddev, void *priv) mddev 463 drivers/md/raid0.c static inline int is_io_in_chunk_boundary(struct mddev *mddev, mddev 477 drivers/md/raid0.c static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) mddev 479 drivers/md/raid0.c struct r0conf *conf = mddev->private; mddev 496 drivers/md/raid0.c &mddev->bio_set); mddev 508 drivers/md/raid0.c stripe_size = zone->nb_dev * mddev->chunk_sectors; mddev 516 drivers/md/raid0.c mddev->chunk_sectors; mddev 518 drivers/md/raid0.c mddev->chunk_sectors) + mddev 519 drivers/md/raid0.c first_stripe_index * mddev->chunk_sectors; mddev 521 drivers/md/raid0.c mddev->chunk_sectors; mddev 523 drivers/md/raid0.c mddev->chunk_sectors) + mddev 524 drivers/md/raid0.c last_stripe_index * mddev->chunk_sectors; mddev 533 drivers/md/raid0.c mddev->chunk_sectors; mddev 535 drivers/md/raid0.c dev_start = first_stripe_index * mddev->chunk_sectors; mddev 540 drivers/md/raid0.c dev_end = (last_stripe_index + 1) * mddev->chunk_sectors; mddev 542 drivers/md/raid0.c dev_end = last_stripe_index * mddev->chunk_sectors; mddev 558 drivers/md/raid0.c if (mddev->gendisk) mddev 560 drivers/md/raid0.c discard_bio, disk_devt(mddev->gendisk), mddev 567 drivers/md/raid0.c static bool raid0_make_request(struct mddev *mddev, struct bio *bio) mddev 569 drivers/md/raid0.c struct r0conf *conf = mddev->private; mddev 579 drivers/md/raid0.c && md_flush_request(mddev, bio)) mddev 583 drivers/md/raid0.c raid0_handle_discard(mddev, bio); mddev 589 drivers/md/raid0.c chunk_sects = mddev->chunk_sectors; mddev 601 drivers/md/raid0.c &mddev->bio_set); mddev 608 drivers/md/raid0.c zone = find_zone(mddev->private, §or); mddev 611 drivers/md/raid0.c tmp_dev = map_sector(mddev, zone, orig_sector, §or); mddev 614 drivers/md/raid0.c tmp_dev = map_sector(mddev, zone, sector, §or); mddev 617 drivers/md/raid0.c WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev)); mddev 631 drivers/md/raid0.c if (mddev->gendisk) mddev 633 drivers/md/raid0.c disk_devt(mddev->gendisk), bio_sector); mddev 634 drivers/md/raid0.c mddev_check_writesame(mddev, bio); mddev 635 drivers/md/raid0.c mddev_check_write_zeroes(mddev, bio); mddev 640 drivers/md/raid0.c static void raid0_status(struct seq_file *seq, struct mddev *mddev) mddev 642 drivers/md/raid0.c seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2); mddev 646 drivers/md/raid0.c static void *raid0_takeover_raid45(struct mddev *mddev) mddev 651 drivers/md/raid0.c if (mddev->degraded != 1) { mddev 653 drivers/md/raid0.c mdname(mddev), mddev 654 drivers/md/raid0.c mddev->degraded); mddev 658 drivers/md/raid0.c rdev_for_each(rdev, mddev) { mddev 660 drivers/md/raid0.c if (rdev->raid_disk == mddev->raid_disks-1) { mddev 662 drivers/md/raid0.c mdname(mddev)); mddev 665 drivers/md/raid0.c rdev->sectors = mddev->dev_sectors; mddev 669 drivers/md/raid0.c mddev->new_level = 0; mddev 670 drivers/md/raid0.c mddev->new_layout = 0; mddev 671 drivers/md/raid0.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 672 drivers/md/raid0.c mddev->raid_disks--; mddev 673 drivers/md/raid0.c mddev->delta_disks = -1; mddev 675 drivers/md/raid0.c mddev->recovery_cp = MaxSector; mddev 676 drivers/md/raid0.c mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); mddev 678 drivers/md/raid0.c create_strip_zones(mddev, &priv_conf); mddev 683 drivers/md/raid0.c static void *raid0_takeover_raid10(struct mddev *mddev) mddev 693 drivers/md/raid0.c if (mddev->layout != ((1 << 8) + 2)) { mddev 695 drivers/md/raid0.c mdname(mddev), mddev 696 drivers/md/raid0.c mddev->layout); mddev 699 drivers/md/raid0.c if (mddev->raid_disks & 1) { mddev 701 drivers/md/raid0.c mdname(mddev)); mddev 704 drivers/md/raid0.c if (mddev->degraded != (mddev->raid_disks>>1)) { mddev 706 drivers/md/raid0.c mdname(mddev)); mddev 711 drivers/md/raid0.c mddev->new_level = 0; mddev 712 drivers/md/raid0.c mddev->new_layout = 0; mddev 713 drivers/md/raid0.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 714 drivers/md/raid0.c mddev->delta_disks = - mddev->raid_disks / 2; mddev 715 drivers/md/raid0.c mddev->raid_disks += mddev->delta_disks; mddev 716 drivers/md/raid0.c mddev->degraded = 0; mddev 718 drivers/md/raid0.c mddev->recovery_cp = MaxSector; mddev 719 drivers/md/raid0.c mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); mddev 721 drivers/md/raid0.c create_strip_zones(mddev, &priv_conf); mddev 725 drivers/md/raid0.c static void *raid0_takeover_raid1(struct mddev *mddev) mddev 733 drivers/md/raid0.c if ((mddev->raid_disks - 1) != mddev->degraded) { mddev 735 drivers/md/raid0.c mdname(mddev)); mddev 746 drivers/md/raid0.c while (chunksect && (mddev->array_sectors & (chunksect - 1))) mddev 754 drivers/md/raid0.c mddev->new_level = 0; mddev 755 drivers/md/raid0.c mddev->new_layout = 0; mddev 756 drivers/md/raid0.c mddev->new_chunk_sectors = chunksect; mddev 757 drivers/md/raid0.c mddev->chunk_sectors = chunksect; mddev 758 drivers/md/raid0.c mddev->delta_disks = 1 - mddev->raid_disks; mddev 759 drivers/md/raid0.c mddev->raid_disks = 1; mddev 761 drivers/md/raid0.c mddev->recovery_cp = MaxSector; mddev 762 drivers/md/raid0.c mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS); mddev 764 drivers/md/raid0.c create_strip_zones(mddev, &priv_conf); mddev 768 drivers/md/raid0.c static void *raid0_takeover(struct mddev *mddev) mddev 777 drivers/md/raid0.c if (mddev->bitmap) { mddev 779 drivers/md/raid0.c mdname(mddev)); mddev 782 drivers/md/raid0.c if (mddev->level == 4) mddev 783 drivers/md/raid0.c return raid0_takeover_raid45(mddev); mddev 785 drivers/md/raid0.c if (mddev->level == 5) { mddev 786 drivers/md/raid0.c if (mddev->layout == ALGORITHM_PARITY_N) mddev 787 drivers/md/raid0.c return raid0_takeover_raid45(mddev); mddev 790 drivers/md/raid0.c mdname(mddev), ALGORITHM_PARITY_N); mddev 793 drivers/md/raid0.c if (mddev->level == 10) mddev 794 drivers/md/raid0.c return raid0_takeover_raid10(mddev); mddev 796 drivers/md/raid0.c if (mddev->level == 1) mddev 797 drivers/md/raid0.c return raid0_takeover_raid1(mddev); mddev 800 drivers/md/raid0.c mddev->level); mddev 805 drivers/md/raid0.c static void raid0_quiesce(struct mddev *mddev, int quiesce) mddev 58 drivers/md/raid1.c struct mddev *mddev = rdev->mddev; mddev 60 drivers/md/raid1.c wi = mempool_alloc(mddev->wb_info_pool, GFP_NOIO); mddev 76 drivers/md/raid1.c mempool_free(wi, mddev->wb_info_pool); mddev 87 drivers/md/raid1.c struct mddev *mddev = rdev->mddev; mddev 93 drivers/md/raid1.c mempool_free(wi, mddev->wb_info_pool); mddev 162 drivers/md/raid1.c if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) mddev 234 drivers/md/raid1.c struct r1conf *conf = r1_bio->mddev->private; mddev 242 drivers/md/raid1.c struct r1conf *conf = r1_bio->mddev->private; mddev 249 drivers/md/raid1.c rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); mddev 260 drivers/md/raid1.c struct mddev *mddev = r1_bio->mddev; mddev 261 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 271 drivers/md/raid1.c md_wakeup_thread(mddev->thread); mddev 282 drivers/md/raid1.c struct r1conf *conf = r1_bio->mddev->private; mddev 316 drivers/md/raid1.c struct r1conf *conf = r1_bio->mddev->private; mddev 328 drivers/md/raid1.c struct r1conf *conf = r1_bio->mddev->private; mddev 345 drivers/md/raid1.c struct r1conf *conf = r1_bio->mddev->private; mddev 367 drivers/md/raid1.c if (r1_bio->mddev->degraded == conf->raid_disks || mddev 368 drivers/md/raid1.c (r1_bio->mddev->degraded == conf->raid_disks-1 && mddev 376 drivers/md/raid1.c rdev_dec_pending(rdev, conf->mddev); mddev 383 drivers/md/raid1.c mdname(conf->mddev), mddev 401 drivers/md/raid1.c md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, mddev 405 drivers/md/raid1.c md_write_end(r1_bio->mddev); mddev 428 drivers/md/raid1.c struct r1conf *conf = r1_bio->mddev->private; mddev 443 drivers/md/raid1.c conf->mddev->recovery); mddev 449 drivers/md/raid1.c md_error(r1_bio->mddev, rdev); mddev 532 drivers/md/raid1.c rdev_dec_pending(rdev, conf->mddev); mddev 609 drivers/md/raid1.c if ((conf->mddev->recovery_cp < this_sector + sectors) || mddev 610 drivers/md/raid1.c (mddev_is_clustered(conf->mddev) && mddev 611 drivers/md/raid1.c md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, mddev 772 drivers/md/raid1.c static int raid1_congested(struct mddev *mddev, int bits) mddev 774 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 805 drivers/md/raid1.c md_bitmap_unplug(conf->mddev->bitmap); mddev 915 drivers/md/raid1.c test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery), mddev 918 drivers/md/raid1.c if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { mddev 1094 drivers/md/raid1.c raid1_log(conf->mddev, "wait freeze"); mddev 1119 drivers/md/raid1.c behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); mddev 1169 drivers/md/raid1.c struct mddev *mddev = plug->cb.data; mddev 1170 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 1179 drivers/md/raid1.c md_wakeup_thread(mddev->thread); mddev 1190 drivers/md/raid1.c static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio) mddev 1195 drivers/md/raid1.c r1_bio->mddev = mddev; mddev 1200 drivers/md/raid1.c alloc_r1bio(struct mddev *mddev, struct bio *bio) mddev 1202 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 1208 drivers/md/raid1.c init_r1bio(r1_bio, mddev, bio); mddev 1212 drivers/md/raid1.c static void raid1_read_request(struct mddev *mddev, struct bio *bio, mddev 1215 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 1218 drivers/md/raid1.c struct bitmap *bitmap = mddev->bitmap; mddev 1252 drivers/md/raid1.c r1_bio = alloc_r1bio(mddev, bio); mddev 1254 drivers/md/raid1.c init_r1bio(r1_bio, mddev, bio); mddev 1267 drivers/md/raid1.c mdname(mddev), mddev 1278 drivers/md/raid1.c mdname(mddev), mddev 1288 drivers/md/raid1.c raid1_log(mddev, "wait behind writes"); mddev 1305 drivers/md/raid1.c read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); mddev 1319 drivers/md/raid1.c if (mddev->gendisk) mddev 1321 drivers/md/raid1.c disk_devt(mddev->gendisk), r1_bio->sector); mddev 1326 drivers/md/raid1.c static void raid1_write_request(struct mddev *mddev, struct bio *bio, mddev 1329 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 1332 drivers/md/raid1.c struct bitmap *bitmap = mddev->bitmap; mddev 1340 drivers/md/raid1.c if (mddev_is_clustered(mddev) && mddev 1341 drivers/md/raid1.c md_cluster_ops->area_resyncing(mddev, WRITE, mddev 1348 drivers/md/raid1.c if (!md_cluster_ops->area_resyncing(mddev, WRITE, mddev 1364 drivers/md/raid1.c r1_bio = alloc_r1bio(mddev, bio); mddev 1368 drivers/md/raid1.c md_wakeup_thread(mddev->thread); mddev 1369 drivers/md/raid1.c raid1_log(mddev, "wait queued"); mddev 1426 drivers/md/raid1.c rdev_dec_pending(rdev, mddev); mddev 1455 drivers/md/raid1.c rdev_dec_pending(conf->mirrors[j].rdev, mddev); mddev 1458 drivers/md/raid1.c raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); mddev 1459 drivers/md/raid1.c md_wait_for_blocked_rdev(blocked_rdev, mddev); mddev 1491 drivers/md/raid1.c < mddev->bitmap_info.max_write_behind) && mddev 1503 drivers/md/raid1.c GFP_NOIO, &mddev->bio_set); mddev 1505 drivers/md/raid1.c mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); mddev 1530 drivers/md/raid1.c conf->raid_disks - mddev->degraded > 1) mddev 1536 drivers/md/raid1.c if (mddev->gendisk) mddev 1538 drivers/md/raid1.c mbio, disk_devt(mddev->gendisk), mddev 1543 drivers/md/raid1.c cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); mddev 1556 drivers/md/raid1.c md_wakeup_thread(mddev->thread); mddev 1566 drivers/md/raid1.c static bool raid1_make_request(struct mddev *mddev, struct bio *bio) mddev 1571 drivers/md/raid1.c && md_flush_request(mddev, bio)) mddev 1585 drivers/md/raid1.c raid1_read_request(mddev, bio, sectors, NULL); mddev 1587 drivers/md/raid1.c if (!md_write_start(mddev,bio)) mddev 1589 drivers/md/raid1.c raid1_write_request(mddev, bio, sectors); mddev 1594 drivers/md/raid1.c static void raid1_status(struct seq_file *seq, struct mddev *mddev) mddev 1596 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 1600 drivers/md/raid1.c conf->raid_disks - mddev->degraded); mddev 1611 drivers/md/raid1.c static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) mddev 1614 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 1624 drivers/md/raid1.c if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev mddev 1625 drivers/md/raid1.c && (conf->raid_disks - mddev->degraded) == 1) { mddev 1632 drivers/md/raid1.c conf->recovery_disabled = mddev->recovery_disabled; mddev 1638 drivers/md/raid1.c mddev->degraded++; mddev 1644 drivers/md/raid1.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 1645 drivers/md/raid1.c set_mask_bits(&mddev->sb_flags, 0, mddev 1649 drivers/md/raid1.c mdname(mddev), bdevname(rdev->bdev, b), mddev 1650 drivers/md/raid1.c mdname(mddev), conf->raid_disks - mddev->degraded); mddev 1662 drivers/md/raid1.c pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, mddev 1690 drivers/md/raid1.c static int raid1_spare_active(struct mddev *mddev) mddev 1693 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 1735 drivers/md/raid1.c mddev->degraded -= count; mddev 1742 drivers/md/raid1.c static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) mddev 1744 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 1751 drivers/md/raid1.c if (mddev->recovery_disabled == conf->recovery_disabled) mddev 1754 drivers/md/raid1.c if (md_integrity_add_rdev(rdev, mddev)) mddev 1773 drivers/md/raid1.c if (mddev->gendisk) mddev 1774 drivers/md/raid1.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 1800 drivers/md/raid1.c if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) mddev 1801 drivers/md/raid1.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); mddev 1806 drivers/md/raid1.c static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) mddev 1808 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 1827 drivers/md/raid1.c mddev->recovery_disabled != conf->recovery_disabled && mddev 1828 drivers/md/raid1.c mddev->degraded < conf->raid_disks) { mddev 1868 drivers/md/raid1.c err = md_integrity_register(mddev); mddev 1894 drivers/md/raid1.c static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) mddev 1902 drivers/md/raid1.c md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); mddev 1911 drivers/md/raid1.c struct mddev *mddev = r1_bio->mddev; mddev 1919 drivers/md/raid1.c md_done_sync(mddev, s, uptodate); mddev 1928 drivers/md/raid1.c struct mddev *mddev = r1_bio->mddev; mddev 1929 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 1935 drivers/md/raid1.c abort_sync_write(mddev, r1_bio); mddev 1939 drivers/md/raid1.c mddev->recovery); mddev 1964 drivers/md/raid1.c rdev->mddev->recovery); mddev 1968 drivers/md/raid1.c md_error(rdev->mddev, rdev); mddev 1985 drivers/md/raid1.c struct mddev *mddev = r1_bio->mddev; mddev 1986 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 1998 drivers/md/raid1.c md_error(mddev, rdev); mddev 2042 drivers/md/raid1.c mdname(mddev), bio_devname(bio, b), mddev 2053 drivers/md/raid1.c mddev->recovery_disabled; mddev 2054 drivers/md/raid1.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 2055 drivers/md/raid1.c md_done_sync(mddev, r1_bio->sectors, 0); mddev 2079 drivers/md/raid1.c rdev_dec_pending(rdev, mddev); mddev 2113 drivers/md/raid1.c struct mddev *mddev = r1_bio->mddev; mddev 2114 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 2145 drivers/md/raid1.c rdev_dec_pending(conf->mirrors[primary].rdev, mddev); mddev 2178 drivers/md/raid1.c atomic64_add(r1_bio->sectors, &mddev->resync_mismatches); mddev 2179 drivers/md/raid1.c if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) mddev 2183 drivers/md/raid1.c rdev_dec_pending(conf->mirrors[i].rdev, mddev); mddev 2191 drivers/md/raid1.c static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) mddev 2193 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 2203 drivers/md/raid1.c if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) mddev 2215 drivers/md/raid1.c !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) mddev 2218 drivers/md/raid1.c abort_sync_write(mddev, r1_bio); mddev 2247 drivers/md/raid1.c struct mddev *mddev = conf->mddev; mddev 2275 drivers/md/raid1.c rdev_dec_pending(rdev, mddev); mddev 2289 drivers/md/raid1.c md_error(mddev, rdev); mddev 2306 drivers/md/raid1.c rdev_dec_pending(rdev, mddev); mddev 2326 drivers/md/raid1.c mdname(mddev), s, mddev 2331 drivers/md/raid1.c rdev_dec_pending(rdev, mddev); mddev 2342 drivers/md/raid1.c struct mddev *mddev = r1_bio->mddev; mddev 2343 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 2382 drivers/md/raid1.c &mddev->bio_set); mddev 2385 drivers/md/raid1.c &mddev->bio_set); mddev 2426 drivers/md/raid1.c md_error(conf->mddev, rdev); mddev 2430 drivers/md/raid1.c md_done_sync(conf->mddev, s, 1); mddev 2444 drivers/md/raid1.c rdev_dec_pending(rdev, conf->mddev); mddev 2452 drivers/md/raid1.c md_error(conf->mddev, mddev 2458 drivers/md/raid1.c conf->mddev); mddev 2471 drivers/md/raid1.c md_wakeup_thread(conf->mddev->thread); mddev 2481 drivers/md/raid1.c struct mddev *mddev = conf->mddev; mddev 2500 drivers/md/raid1.c if (mddev->ro == 0 mddev 2506 drivers/md/raid1.c } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { mddev 2507 drivers/md/raid1.c md_error(mddev, rdev); mddev 2512 drivers/md/raid1.c rdev_dec_pending(rdev, conf->mddev); mddev 2518 drivers/md/raid1.c raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); mddev 2523 drivers/md/raid1.c struct mddev *mddev = thread->mddev; mddev 2526 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 2531 drivers/md/raid1.c md_check_recovery(mddev); mddev 2534 drivers/md/raid1.c !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { mddev 2537 drivers/md/raid1.c if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) mddev 2546 drivers/md/raid1.c if (mddev->degraded) mddev 2570 drivers/md/raid1.c mddev = r1_bio->mddev; mddev 2571 drivers/md/raid1.c conf = mddev->private; mddev 2577 drivers/md/raid1.c sync_request_write(mddev, r1_bio); mddev 2587 drivers/md/raid1.c if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) mddev 2588 drivers/md/raid1.c md_check_recovery(mddev); mddev 2631 drivers/md/raid1.c static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, mddev 2634 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 2653 drivers/md/raid1.c max_sector = mddev->dev_sectors; mddev 2660 drivers/md/raid1.c if (mddev->curr_resync < max_sector) /* aborted */ mddev 2661 drivers/md/raid1.c md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, mddev 2666 drivers/md/raid1.c md_bitmap_close_sync(mddev->bitmap); mddev 2669 drivers/md/raid1.c if (mddev_is_clustered(mddev)) { mddev 2676 drivers/md/raid1.c if (mddev->bitmap == NULL && mddev 2677 drivers/md/raid1.c mddev->recovery_cp == MaxSector && mddev 2678 drivers/md/raid1.c !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && mddev 2686 drivers/md/raid1.c if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && mddev 2687 drivers/md/raid1.c !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { mddev 2704 drivers/md/raid1.c md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, mddev 2705 drivers/md/raid1.c mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high)); mddev 2723 drivers/md/raid1.c r1_bio->mddev = mddev; mddev 2771 drivers/md/raid1.c test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && mddev 2772 drivers/md/raid1.c !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { mddev 2809 drivers/md/raid1.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 2819 drivers/md/raid1.c conf->recovery_disabled = mddev->recovery_disabled; mddev 2820 drivers/md/raid1.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 2832 drivers/md/raid1.c if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) mddev 2849 drivers/md/raid1.c if (max_sector > mddev->resync_max) mddev 2850 drivers/md/raid1.c max_sector = mddev->resync_max; /* Don't do IO beyond here */ mddev 2863 drivers/md/raid1.c if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, mddev 2866 drivers/md/raid1.c !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) mddev 2894 drivers/md/raid1.c if (mddev_is_clustered(mddev) && mddev 2896 drivers/md/raid1.c conf->cluster_sync_low = mddev->curr_resync_completed; mddev 2899 drivers/md/raid1.c md_cluster_ops->resync_info_update(mddev, mddev 2907 drivers/md/raid1.c if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { mddev 2930 drivers/md/raid1.c static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) mddev 2935 drivers/md/raid1.c return mddev->dev_sectors; mddev 2938 drivers/md/raid1.c static struct r1conf *setup_conf(struct mddev *mddev) mddev 2971 drivers/md/raid1.c mddev->raid_disks, 2), mddev 2983 drivers/md/raid1.c conf->poolinfo->raid_disks = mddev->raid_disks * 2; mddev 2993 drivers/md/raid1.c conf->poolinfo->mddev = mddev; mddev 2997 drivers/md/raid1.c rdev_for_each(rdev, mddev) { mddev 2999 drivers/md/raid1.c if (disk_idx >= mddev->raid_disks mddev 3003 drivers/md/raid1.c disk = conf->mirrors + mddev->raid_disks + disk_idx; mddev 3013 drivers/md/raid1.c conf->raid_disks = mddev->raid_disks; mddev 3014 drivers/md/raid1.c conf->mddev = mddev; mddev 3023 drivers/md/raid1.c conf->recovery_disabled = mddev->recovery_disabled - 1; mddev 3055 drivers/md/raid1.c conf->thread = md_register_thread(raid1d, mddev, "raid1"); mddev 3077 drivers/md/raid1.c static void raid1_free(struct mddev *mddev, void *priv); mddev 3078 drivers/md/raid1.c static int raid1_run(struct mddev *mddev) mddev 3086 drivers/md/raid1.c if (mddev->level != 1) { mddev 3088 drivers/md/raid1.c mdname(mddev), mddev->level); mddev 3091 drivers/md/raid1.c if (mddev->reshape_position != MaxSector) { mddev 3093 drivers/md/raid1.c mdname(mddev)); mddev 3096 drivers/md/raid1.c if (mddev_init_writes_pending(mddev) < 0) mddev 3103 drivers/md/raid1.c if (mddev->private == NULL) mddev 3104 drivers/md/raid1.c conf = setup_conf(mddev); mddev 3106 drivers/md/raid1.c conf = mddev->private; mddev 3111 drivers/md/raid1.c if (mddev->queue) { mddev 3112 drivers/md/raid1.c blk_queue_max_write_same_sectors(mddev->queue, 0); mddev 3113 drivers/md/raid1.c blk_queue_max_write_zeroes_sectors(mddev->queue, 0); mddev 3116 drivers/md/raid1.c rdev_for_each(rdev, mddev) { mddev 3117 drivers/md/raid1.c if (!mddev->gendisk) mddev 3119 drivers/md/raid1.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 3125 drivers/md/raid1.c mddev->degraded = 0; mddev 3130 drivers/md/raid1.c mddev->degraded++; mddev 3134 drivers/md/raid1.c if (conf->raid_disks - mddev->degraded < 1) { mddev 3139 drivers/md/raid1.c if (conf->raid_disks - mddev->degraded == 1) mddev 3140 drivers/md/raid1.c mddev->recovery_cp = MaxSector; mddev 3142 drivers/md/raid1.c if (mddev->recovery_cp != MaxSector) mddev 3144 drivers/md/raid1.c mdname(mddev)); mddev 3146 drivers/md/raid1.c mdname(mddev), mddev->raid_disks - mddev->degraded, mddev 3147 drivers/md/raid1.c mddev->raid_disks); mddev 3152 drivers/md/raid1.c mddev->thread = conf->thread; mddev 3154 drivers/md/raid1.c mddev->private = conf; mddev 3155 drivers/md/raid1.c set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); mddev 3157 drivers/md/raid1.c md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); mddev 3159 drivers/md/raid1.c if (mddev->queue) { mddev 3162 drivers/md/raid1.c mddev->queue); mddev 3165 drivers/md/raid1.c mddev->queue); mddev 3168 drivers/md/raid1.c ret = md_integrity_register(mddev); mddev 3170 drivers/md/raid1.c md_unregister_thread(&mddev->thread); mddev 3176 drivers/md/raid1.c raid1_free(mddev, conf); mddev 3180 drivers/md/raid1.c static void raid1_free(struct mddev *mddev, void *priv) mddev 3196 drivers/md/raid1.c static int raid1_resize(struct mddev *mddev, sector_t sectors) mddev 3205 drivers/md/raid1.c sector_t newsize = raid1_size(mddev, sectors, 0); mddev 3206 drivers/md/raid1.c if (mddev->external_size && mddev 3207 drivers/md/raid1.c mddev->array_sectors > newsize) mddev 3209 drivers/md/raid1.c if (mddev->bitmap) { mddev 3210 drivers/md/raid1.c int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); mddev 3214 drivers/md/raid1.c md_set_array_sectors(mddev, newsize); mddev 3215 drivers/md/raid1.c if (sectors > mddev->dev_sectors && mddev 3216 drivers/md/raid1.c mddev->recovery_cp > mddev->dev_sectors) { mddev 3217 drivers/md/raid1.c mddev->recovery_cp = mddev->dev_sectors; mddev 3218 drivers/md/raid1.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 3220 drivers/md/raid1.c mddev->dev_sectors = sectors; mddev 3221 drivers/md/raid1.c mddev->resync_max_sectors = sectors; mddev 3225 drivers/md/raid1.c static int raid1_reshape(struct mddev *mddev) mddev 3241 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 3251 drivers/md/raid1.c if (mddev->chunk_sectors != mddev->new_chunk_sectors || mddev 3252 drivers/md/raid1.c mddev->layout != mddev->new_layout || mddev 3253 drivers/md/raid1.c mddev->level != mddev->new_level) { mddev 3254 drivers/md/raid1.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 3255 drivers/md/raid1.c mddev->new_layout = mddev->layout; mddev 3256 drivers/md/raid1.c mddev->new_level = mddev->level; mddev 3260 drivers/md/raid1.c if (!mddev_is_clustered(mddev)) mddev 3261 drivers/md/raid1.c md_allow_write(mddev); mddev 3263 drivers/md/raid1.c raid_disks = mddev->raid_disks + mddev->delta_disks; mddev 3277 drivers/md/raid1.c newpoolinfo->mddev = mddev; mddev 3304 drivers/md/raid1.c sysfs_unlink_rdev(mddev, rdev); mddev 3306 drivers/md/raid1.c sysfs_unlink_rdev(mddev, rdev); mddev 3307 drivers/md/raid1.c if (sysfs_link_rdev(mddev, rdev)) mddev 3309 drivers/md/raid1.c mdname(mddev), rdev->raid_disk); mddev 3320 drivers/md/raid1.c mddev->degraded += (raid_disks - conf->raid_disks); mddev 3322 drivers/md/raid1.c conf->raid_disks = mddev->raid_disks = raid_disks; mddev 3323 drivers/md/raid1.c mddev->delta_disks = 0; mddev 3327 drivers/md/raid1.c set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); mddev 3328 drivers/md/raid1.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 3329 drivers/md/raid1.c md_wakeup_thread(mddev->thread); mddev 3335 drivers/md/raid1.c static void raid1_quiesce(struct mddev *mddev, int quiesce) mddev 3337 drivers/md/raid1.c struct r1conf *conf = mddev->private; mddev 3345 drivers/md/raid1.c static void *raid1_takeover(struct mddev *mddev) mddev 3350 drivers/md/raid1.c if (mddev->level == 5 && mddev->raid_disks == 2) { mddev 3352 drivers/md/raid1.c mddev->new_level = 1; mddev 3353 drivers/md/raid1.c mddev->new_layout = 0; mddev 3354 drivers/md/raid1.c mddev->new_chunk_sectors = 0; mddev 3355 drivers/md/raid1.c conf = setup_conf(mddev); mddev 3359 drivers/md/raid1.c mddev_clear_unsupported_flags(mddev, mddev 64 drivers/md/raid1.h struct mddev *mddev; mddev 69 drivers/md/raid1.h struct mddev *mddev; mddev 161 drivers/md/raid1.h struct mddev *mddev; mddev 71 drivers/md/raid10.c static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, mddev 73 drivers/md/raid10.c static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio); mddev 129 drivers/md/raid10.c if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || mddev 130 drivers/md/raid10.c test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) mddev 174 drivers/md/raid10.c &conf->mddev->recovery)) { mddev 255 drivers/md/raid10.c struct r10conf *conf = r10_bio->mddev->private; mddev 263 drivers/md/raid10.c struct r10conf *conf = r10_bio->mddev->private; mddev 273 drivers/md/raid10.c struct mddev *mddev = r10_bio->mddev; mddev 274 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 284 drivers/md/raid10.c md_wakeup_thread(mddev->thread); mddev 295 drivers/md/raid10.c struct r10conf *conf = r10_bio->mddev->private; mddev 315 drivers/md/raid10.c struct r10conf *conf = r10_bio->mddev->private; mddev 355 drivers/md/raid10.c struct r10conf *conf = r10_bio->mddev->private; mddev 387 drivers/md/raid10.c rdev_dec_pending(rdev, conf->mddev); mddev 394 drivers/md/raid10.c mdname(conf->mddev), mddev 405 drivers/md/raid10.c md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, mddev 409 drivers/md/raid10.c md_write_end(r10_bio->mddev); mddev 432 drivers/md/raid10.c struct r10conf *conf = r10_bio->mddev->private; mddev 457 drivers/md/raid10.c md_error(rdev->mddev, rdev); mddev 462 drivers/md/raid10.c &rdev->mddev->recovery); mddev 467 drivers/md/raid10.c md_error(rdev->mddev, rdev); mddev 531 drivers/md/raid10.c rdev_dec_pending(rdev, conf->mddev); mddev 630 drivers/md/raid10.c conf->mddev->reshape_backwards)) { mddev 735 drivers/md/raid10.c if ((conf->mddev->recovery_cp < MaxSector mddev 737 drivers/md/raid10.c (mddev_is_clustered(conf->mddev) && mddev 738 drivers/md/raid10.c md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, mddev 851 drivers/md/raid10.c static int raid10_congested(struct mddev *mddev, int bits) mddev 853 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 905 drivers/md/raid10.c md_bitmap_unplug(conf->mddev->bitmap); mddev 993 drivers/md/raid10.c raid10_log(conf->mddev, "wait barrier"); mddev 1056 drivers/md/raid10.c if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) || mddev 1073 drivers/md/raid10.c struct mddev *mddev = plug->cb.data; mddev 1074 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1083 drivers/md/raid10.c md_wakeup_thread(mddev->thread); mddev 1090 drivers/md/raid10.c md_bitmap_unplug(mddev->bitmap); mddev 1117 drivers/md/raid10.c static void regular_request_wait(struct mddev *mddev, struct r10conf *conf, mddev 1121 drivers/md/raid10.c while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev 1124 drivers/md/raid10.c raid10_log(conf->mddev, "wait reshape"); mddev 1134 drivers/md/raid10.c static void raid10_read_request(struct mddev *mddev, struct bio *bio, mddev 1137 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1176 drivers/md/raid10.c regular_request_wait(mddev, conf, bio, r10_bio->sectors); mddev 1181 drivers/md/raid10.c mdname(mddev), b, mddev 1189 drivers/md/raid10.c mdname(mddev), mddev 1205 drivers/md/raid10.c read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set); mddev 1220 drivers/md/raid10.c if (mddev->gendisk) mddev 1222 drivers/md/raid10.c read_bio, disk_devt(mddev->gendisk), mddev 1228 drivers/md/raid10.c static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, mddev 1238 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1253 drivers/md/raid10.c mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); mddev 1270 drivers/md/raid10.c if (conf->mddev->gendisk) mddev 1272 drivers/md/raid10.c mbio, disk_devt(conf->mddev->gendisk), mddev 1279 drivers/md/raid10.c cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug)); mddev 1292 drivers/md/raid10.c md_wakeup_thread(mddev->thread); mddev 1296 drivers/md/raid10.c static void raid10_write_request(struct mddev *mddev, struct bio *bio, mddev 1299 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1305 drivers/md/raid10.c if ((mddev_is_clustered(mddev) && mddev 1306 drivers/md/raid10.c md_cluster_ops->area_resyncing(mddev, WRITE, mddev 1313 drivers/md/raid10.c if (!md_cluster_ops->area_resyncing(mddev, WRITE, mddev 1322 drivers/md/raid10.c regular_request_wait(mddev, conf, bio, sectors); mddev 1323 drivers/md/raid10.c if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev 1324 drivers/md/raid10.c (mddev->reshape_backwards mddev 1330 drivers/md/raid10.c mddev->reshape_position = conf->reshape_progress; mddev 1331 drivers/md/raid10.c set_mask_bits(&mddev->sb_flags, 0, mddev 1333 drivers/md/raid10.c md_wakeup_thread(mddev->thread); mddev 1334 drivers/md/raid10.c raid10_log(conf->mddev, "wait reshape metadata"); mddev 1335 drivers/md/raid10.c wait_event(mddev->sb_wait, mddev 1336 drivers/md/raid10.c !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev 1338 drivers/md/raid10.c conf->reshape_safe = mddev->reshape_position; mddev 1342 drivers/md/raid10.c md_wakeup_thread(mddev->thread); mddev 1343 drivers/md/raid10.c raid10_log(mddev, "wait queued"); mddev 1453 drivers/md/raid10.c rdev_dec_pending(conf->mirrors[d].rdev, mddev); mddev 1464 drivers/md/raid10.c rdev_dec_pending(rdev, mddev); mddev 1468 drivers/md/raid10.c raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk); mddev 1469 drivers/md/raid10.c md_wait_for_blocked_rdev(blocked_rdev, mddev); mddev 1489 drivers/md/raid10.c md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); mddev 1493 drivers/md/raid10.c raid10_write_one_disk(mddev, r10_bio, bio, false, i); mddev 1495 drivers/md/raid10.c raid10_write_one_disk(mddev, r10_bio, bio, true, i); mddev 1500 drivers/md/raid10.c static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) mddev 1502 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1510 drivers/md/raid10.c r10_bio->mddev = mddev; mddev 1516 drivers/md/raid10.c raid10_read_request(mddev, bio, r10_bio); mddev 1518 drivers/md/raid10.c raid10_write_request(mddev, bio, r10_bio); mddev 1521 drivers/md/raid10.c static bool raid10_make_request(struct mddev *mddev, struct bio *bio) mddev 1523 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1529 drivers/md/raid10.c && md_flush_request(mddev, bio)) mddev 1532 drivers/md/raid10.c if (!md_write_start(mddev, bio)) mddev 1547 drivers/md/raid10.c __make_request(mddev, bio, sectors); mddev 1554 drivers/md/raid10.c static void raid10_status(struct seq_file *seq, struct mddev *mddev) mddev 1556 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1560 drivers/md/raid10.c seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2); mddev 1572 drivers/md/raid10.c conf->geo.raid_disks - mddev->degraded); mddev 1634 drivers/md/raid10.c static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) mddev 1637 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1647 drivers/md/raid10.c if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev mddev 1656 drivers/md/raid10.c mddev->degraded++; mddev 1660 drivers/md/raid10.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 1663 drivers/md/raid10.c set_mask_bits(&mddev->sb_flags, 0, mddev 1668 drivers/md/raid10.c mdname(mddev), bdevname(rdev->bdev, b), mddev 1669 drivers/md/raid10.c mdname(mddev), conf->geo.raid_disks - mddev->degraded); mddev 1682 drivers/md/raid10.c pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded, mddev 1706 drivers/md/raid10.c static int raid10_spare_active(struct mddev *mddev) mddev 1709 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1747 drivers/md/raid10.c mddev->degraded -= count; mddev 1754 drivers/md/raid10.c static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) mddev 1756 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1762 drivers/md/raid10.c if (mddev->recovery_cp < MaxSector) mddev 1770 drivers/md/raid10.c if (md_integrity_add_rdev(rdev, mddev)) mddev 1784 drivers/md/raid10.c if (p->recovery_disabled == mddev->recovery_disabled) mddev 1794 drivers/md/raid10.c if (mddev->gendisk) mddev 1795 drivers/md/raid10.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 1802 drivers/md/raid10.c if (mddev->gendisk) mddev 1803 drivers/md/raid10.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 1807 drivers/md/raid10.c p->recovery_disabled = mddev->recovery_disabled - 1; mddev 1815 drivers/md/raid10.c if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) mddev 1816 drivers/md/raid10.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); mddev 1822 drivers/md/raid10.c static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) mddev 1824 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1847 drivers/md/raid10.c mddev->recovery_disabled != p->recovery_disabled && mddev 1875 drivers/md/raid10.c err = md_integrity_register(mddev); mddev 1885 drivers/md/raid10.c struct r10conf *conf = r10_bio->mddev->private; mddev 1899 drivers/md/raid10.c rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); mddev 1912 drivers/md/raid10.c struct r10conf *conf = r10_bio->mddev->private; mddev 1928 drivers/md/raid10.c struct mddev *mddev = r10_bio->mddev; mddev 1939 drivers/md/raid10.c md_done_sync(mddev, s, 1); mddev 1956 drivers/md/raid10.c struct mddev *mddev = r10_bio->mddev; mddev 1957 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 1973 drivers/md/raid10.c md_error(mddev, rdev); mddev 1978 drivers/md/raid10.c &rdev->mddev->recovery); mddev 1987 drivers/md/raid10.c rdev_dec_pending(rdev, mddev); mddev 2008 drivers/md/raid10.c static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) mddev 2010 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 2067 drivers/md/raid10.c atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); mddev 2068 drivers/md/raid10.c if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) mddev 2073 drivers/md/raid10.c md_error(rdev->mddev, rdev); mddev 2126 drivers/md/raid10.c md_done_sync(mddev, r10_bio->sectors, 1); mddev 2150 drivers/md/raid10.c struct mddev *mddev = r10_bio->mddev; mddev 2151 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 2189 drivers/md/raid10.c &rdev->mddev->recovery); mddev 2207 drivers/md/raid10.c mdname(mddev)); mddev 2210 drivers/md/raid10.c = mddev->recovery_disabled; mddev 2212 drivers/md/raid10.c &mddev->recovery); mddev 2224 drivers/md/raid10.c static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) mddev 2226 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 2268 drivers/md/raid10.c static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev) mddev 2314 drivers/md/raid10.c &rdev->mddev->recovery); mddev 2318 drivers/md/raid10.c md_error(rdev->mddev, rdev); mddev 2330 drivers/md/raid10.c static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) mddev 2335 drivers/md/raid10.c int max_read_errors = atomic_read(&mddev->max_corr_read_errors); mddev 2348 drivers/md/raid10.c check_decay_read_errors(mddev, rdev); mddev 2355 drivers/md/raid10.c mdname(mddev), b, mddev 2358 drivers/md/raid10.c mdname(mddev), b); mddev 2359 drivers/md/raid10.c md_error(mddev, rdev); mddev 2393 drivers/md/raid10.c rdev_dec_pending(rdev, mddev); mddev 2417 drivers/md/raid10.c md_error(mddev, rdev); mddev 2449 drivers/md/raid10.c mdname(mddev), s, mddev 2456 drivers/md/raid10.c mdname(mddev), mddev 2459 drivers/md/raid10.c rdev_dec_pending(rdev, mddev); mddev 2486 drivers/md/raid10.c mdname(mddev), s, mddev 2492 drivers/md/raid10.c mdname(mddev), mddev 2497 drivers/md/raid10.c mdname(mddev), s, mddev 2505 drivers/md/raid10.c rdev_dec_pending(rdev, mddev); mddev 2518 drivers/md/raid10.c struct mddev *mddev = r10_bio->mddev; mddev 2519 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 2554 drivers/md/raid10.c wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set); mddev 2576 drivers/md/raid10.c static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) mddev 2580 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 2595 drivers/md/raid10.c if (mddev->ro) mddev 2599 drivers/md/raid10.c fix_read_error(conf, mddev, r10_bio); mddev 2602 drivers/md/raid10.c md_error(mddev, rdev); mddev 2604 drivers/md/raid10.c rdev_dec_pending(rdev, mddev); mddev 2607 drivers/md/raid10.c raid10_read_request(mddev, r10_bio->master_bio, r10_bio); mddev 2639 drivers/md/raid10.c md_error(conf->mddev, rdev); mddev 2656 drivers/md/raid10.c md_error(conf->mddev, rdev); mddev 2671 drivers/md/raid10.c rdev_dec_pending(rdev, conf->mddev); mddev 2675 drivers/md/raid10.c md_error(conf->mddev, rdev); mddev 2679 drivers/md/raid10.c rdev_dec_pending(rdev, conf->mddev); mddev 2688 drivers/md/raid10.c rdev_dec_pending(rdev, conf->mddev); mddev 2701 drivers/md/raid10.c md_wakeup_thread(conf->mddev->thread); mddev 2713 drivers/md/raid10.c struct mddev *mddev = thread->mddev; mddev 2716 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 2720 drivers/md/raid10.c md_check_recovery(mddev); mddev 2723 drivers/md/raid10.c !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { mddev 2726 drivers/md/raid10.c if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { mddev 2737 drivers/md/raid10.c if (mddev->degraded) mddev 2762 drivers/md/raid10.c mddev = r10_bio->mddev; mddev 2763 drivers/md/raid10.c conf = mddev->private; mddev 2768 drivers/md/raid10.c reshape_request_write(mddev, r10_bio); mddev 2770 drivers/md/raid10.c sync_request_write(mddev, r10_bio); mddev 2772 drivers/md/raid10.c recovery_request_write(mddev, r10_bio); mddev 2774 drivers/md/raid10.c handle_read_error(mddev, r10_bio); mddev 2779 drivers/md/raid10.c if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING)) mddev 2780 drivers/md/raid10.c md_check_recovery(mddev); mddev 2811 drivers/md/raid10.c if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) || mddev 2812 drivers/md/raid10.c test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery)) mddev 2858 drivers/md/raid10.c window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors; mddev 2901 drivers/md/raid10.c static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, mddev 2904 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 2924 drivers/md/raid10.c if (mddev->bitmap == NULL && mddev 2925 drivers/md/raid10.c mddev->recovery_cp == MaxSector && mddev 2926 drivers/md/raid10.c mddev->reshape_position == MaxSector && mddev 2927 drivers/md/raid10.c !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && mddev 2928 drivers/md/raid10.c !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && mddev 2929 drivers/md/raid10.c !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev 2932 drivers/md/raid10.c return mddev->dev_sectors - sector_nr; mddev 2936 drivers/md/raid10.c max_sector = mddev->dev_sectors; mddev 2937 drivers/md/raid10.c if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || mddev 2938 drivers/md/raid10.c test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) mddev 2939 drivers/md/raid10.c max_sector = mddev->resync_max_sectors; mddev 2953 drivers/md/raid10.c if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { mddev 2959 drivers/md/raid10.c if (mddev->curr_resync < max_sector) { /* aborted */ mddev 2960 drivers/md/raid10.c if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) mddev 2961 drivers/md/raid10.c md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, mddev 2965 drivers/md/raid10.c raid10_find_virt(conf, mddev->curr_resync, i); mddev 2966 drivers/md/raid10.c md_bitmap_end_sync(mddev->bitmap, sect, mddev 2971 drivers/md/raid10.c if ((!mddev->bitmap || conf->fullsync) mddev 2973 drivers/md/raid10.c && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { mddev 2988 drivers/md/raid10.c md_bitmap_close_sync(mddev->bitmap); mddev 2994 drivers/md/raid10.c if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) mddev 2995 drivers/md/raid10.c return reshape_request(mddev, sector_nr, skipped); mddev 3005 drivers/md/raid10.c if (max_sector > mddev->resync_max) mddev 3006 drivers/md/raid10.c max_sector = mddev->resync_max; /* Don't do IO beyond here */ mddev 3038 drivers/md/raid10.c if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { mddev 3075 drivers/md/raid10.c if (sect >= mddev->resync_max_sectors) { mddev 3088 drivers/md/raid10.c must_sync = md_bitmap_start_sync(mddev->bitmap, sect, mddev 3115 drivers/md/raid10.c r10_bio->mddev = mddev; mddev 3134 drivers/md/raid10.c must_sync = md_bitmap_start_sync(mddev->bitmap, sect, mddev 3250 drivers/md/raid10.c &mddev->recovery)) mddev 3252 drivers/md/raid10.c mdname(mddev)); mddev 3254 drivers/md/raid10.c = mddev->recovery_disabled; mddev 3260 drivers/md/raid10.c rdev_dec_pending(mrdev, mddev); mddev 3262 drivers/md/raid10.c rdev_dec_pending(mreplace, mddev); mddev 3265 drivers/md/raid10.c rdev_dec_pending(mrdev, mddev); mddev 3267 drivers/md/raid10.c rdev_dec_pending(mreplace, mddev); mddev 3306 drivers/md/raid10.c md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, mddev 3307 drivers/md/raid10.c mddev_is_clustered(mddev) && mddev 3310 drivers/md/raid10.c if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, mddev 3311 drivers/md/raid10.c &sync_blocks, mddev->degraded) && mddev 3313 drivers/md/raid10.c &mddev->recovery)) { mddev 3323 drivers/md/raid10.c r10_bio->mddev = mddev; mddev 3405 drivers/md/raid10.c mddev); mddev 3410 drivers/md/raid10.c mddev); mddev 3442 drivers/md/raid10.c if (mddev_is_clustered(mddev) && mddev 3443 drivers/md/raid10.c test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { mddev 3446 drivers/md/raid10.c conf->cluster_sync_low = mddev->curr_resync_completed; mddev 3449 drivers/md/raid10.c md_cluster_ops->resync_info_update(mddev, mddev 3453 drivers/md/raid10.c } else if (mddev_is_clustered(mddev)) { mddev 3473 drivers/md/raid10.c mddev->curr_resync_completed, i); mddev 3482 drivers/md/raid10.c md_cluster_ops->resync_info_update(mddev, mddev 3507 drivers/md/raid10.c md_done_sync(mddev, sectors_skipped, 1); mddev 3525 drivers/md/raid10.c raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) mddev 3528 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 3575 drivers/md/raid10.c static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) mddev 3581 drivers/md/raid10.c layout = mddev->layout; mddev 3582 drivers/md/raid10.c chunk = mddev->chunk_sectors; mddev 3583 drivers/md/raid10.c disks = mddev->raid_disks - mddev->delta_disks; mddev 3586 drivers/md/raid10.c layout = mddev->new_layout; mddev 3587 drivers/md/raid10.c chunk = mddev->new_chunk_sectors; mddev 3588 drivers/md/raid10.c disks = mddev->raid_disks; mddev 3593 drivers/md/raid10.c layout = mddev->new_layout; mddev 3594 drivers/md/raid10.c chunk = mddev->new_chunk_sectors; mddev 3595 drivers/md/raid10.c disks = mddev->raid_disks + mddev->delta_disks; mddev 3631 drivers/md/raid10.c static struct r10conf *setup_conf(struct mddev *mddev) mddev 3638 drivers/md/raid10.c copies = setup_geo(&geo, mddev, geo_new); mddev 3642 drivers/md/raid10.c mdname(mddev), PAGE_SIZE); mddev 3646 drivers/md/raid10.c if (copies < 2 || copies > mddev->raid_disks) { mddev 3648 drivers/md/raid10.c mdname(mddev), mddev->new_layout); mddev 3658 drivers/md/raid10.c conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks), mddev 3679 drivers/md/raid10.c calc_sectors(conf, mddev->dev_sectors); mddev 3680 drivers/md/raid10.c if (mddev->reshape_position == MaxSector) { mddev 3684 drivers/md/raid10.c if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) { mddev 3688 drivers/md/raid10.c conf->reshape_progress = mddev->reshape_position; mddev 3705 drivers/md/raid10.c conf->thread = md_register_thread(raid10d, mddev, "raid10"); mddev 3709 drivers/md/raid10.c conf->mddev = mddev; mddev 3723 drivers/md/raid10.c static int raid10_run(struct mddev *mddev) mddev 3734 drivers/md/raid10.c if (mddev_init_writes_pending(mddev) < 0) mddev 3737 drivers/md/raid10.c if (mddev->private == NULL) { mddev 3738 drivers/md/raid10.c conf = setup_conf(mddev); mddev 3741 drivers/md/raid10.c mddev->private = conf; mddev 3743 drivers/md/raid10.c conf = mddev->private; mddev 3747 drivers/md/raid10.c if (mddev_is_clustered(conf->mddev)) { mddev 3750 drivers/md/raid10.c fc = (mddev->layout >> 8) & 255; mddev 3751 drivers/md/raid10.c fo = mddev->layout & (1<<16); mddev 3759 drivers/md/raid10.c mddev->thread = conf->thread; mddev 3762 drivers/md/raid10.c chunk_size = mddev->chunk_sectors << 9; mddev 3763 drivers/md/raid10.c if (mddev->queue) { mddev 3764 drivers/md/raid10.c blk_queue_max_discard_sectors(mddev->queue, mddev 3765 drivers/md/raid10.c mddev->chunk_sectors); mddev 3766 drivers/md/raid10.c blk_queue_max_write_same_sectors(mddev->queue, 0); mddev 3767 drivers/md/raid10.c blk_queue_max_write_zeroes_sectors(mddev->queue, 0); mddev 3768 drivers/md/raid10.c blk_queue_io_min(mddev->queue, chunk_size); mddev 3770 drivers/md/raid10.c blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); mddev 3772 drivers/md/raid10.c blk_queue_io_opt(mddev->queue, chunk_size * mddev 3776 drivers/md/raid10.c rdev_for_each(rdev, mddev) { mddev 3797 drivers/md/raid10.c if (!mddev->reshape_backwards) mddev 3804 drivers/md/raid10.c if (mddev->gendisk) mddev 3805 drivers/md/raid10.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 3815 drivers/md/raid10.c if (mddev->queue) { mddev 3818 drivers/md/raid10.c mddev->queue); mddev 3821 drivers/md/raid10.c mddev->queue); mddev 3826 drivers/md/raid10.c mdname(mddev)); mddev 3840 drivers/md/raid10.c mddev->degraded = 0; mddev 3858 drivers/md/raid10.c mddev->degraded++; mddev 3870 drivers/md/raid10.c disk->recovery_disabled = mddev->recovery_disabled - 1; mddev 3873 drivers/md/raid10.c if (mddev->recovery_cp != MaxSector) mddev 3875 drivers/md/raid10.c mdname(mddev)); mddev 3877 drivers/md/raid10.c mdname(mddev), conf->geo.raid_disks - mddev->degraded, mddev 3882 drivers/md/raid10.c mddev->dev_sectors = conf->dev_sectors; mddev 3883 drivers/md/raid10.c size = raid10_size(mddev, 0, 0); mddev 3884 drivers/md/raid10.c md_set_array_sectors(mddev, size); mddev 3885 drivers/md/raid10.c mddev->resync_max_sectors = size; mddev 3886 drivers/md/raid10.c set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); mddev 3888 drivers/md/raid10.c if (mddev->queue) { mddev 3890 drivers/md/raid10.c ((mddev->chunk_sectors << 9) / PAGE_SIZE); mddev 3897 drivers/md/raid10.c if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) mddev 3898 drivers/md/raid10.c mddev->queue->backing_dev_info->ra_pages = 2 * stripe; mddev 3901 drivers/md/raid10.c if (md_integrity_register(mddev)) mddev 3919 drivers/md/raid10.c clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 3920 drivers/md/raid10.c clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); mddev 3921 drivers/md/raid10.c set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); mddev 3922 drivers/md/raid10.c set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev 3923 drivers/md/raid10.c mddev->sync_thread = md_register_thread(md_do_sync, mddev, mddev 3925 drivers/md/raid10.c if (!mddev->sync_thread) mddev 3932 drivers/md/raid10.c md_unregister_thread(&mddev->thread); mddev 3937 drivers/md/raid10.c mddev->private = NULL; mddev 3942 drivers/md/raid10.c static void raid10_free(struct mddev *mddev, void *priv) mddev 3955 drivers/md/raid10.c static void raid10_quiesce(struct mddev *mddev, int quiesce) mddev 3957 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 3965 drivers/md/raid10.c static int raid10_resize(struct mddev *mddev, sector_t sectors) mddev 3979 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 3982 drivers/md/raid10.c if (mddev->reshape_position != MaxSector) mddev 3988 drivers/md/raid10.c oldsize = raid10_size(mddev, 0, 0); mddev 3989 drivers/md/raid10.c size = raid10_size(mddev, sectors, 0); mddev 3990 drivers/md/raid10.c if (mddev->external_size && mddev 3991 drivers/md/raid10.c mddev->array_sectors > size) mddev 3993 drivers/md/raid10.c if (mddev->bitmap) { mddev 3994 drivers/md/raid10.c int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0); mddev 3998 drivers/md/raid10.c md_set_array_sectors(mddev, size); mddev 3999 drivers/md/raid10.c if (sectors > mddev->dev_sectors && mddev 4000 drivers/md/raid10.c mddev->recovery_cp > oldsize) { mddev 4001 drivers/md/raid10.c mddev->recovery_cp = oldsize; mddev 4002 drivers/md/raid10.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 4005 drivers/md/raid10.c mddev->dev_sectors = conf->dev_sectors; mddev 4006 drivers/md/raid10.c mddev->resync_max_sectors = size; mddev 4010 drivers/md/raid10.c static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) mddev 4015 drivers/md/raid10.c if (mddev->degraded > 0) { mddev 4017 drivers/md/raid10.c mdname(mddev)); mddev 4023 drivers/md/raid10.c mddev->new_level = 10; mddev 4025 drivers/md/raid10.c mddev->new_layout = (1<<8) + 2; mddev 4026 drivers/md/raid10.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 4027 drivers/md/raid10.c mddev->delta_disks = mddev->raid_disks; mddev 4028 drivers/md/raid10.c mddev->raid_disks *= 2; mddev 4030 drivers/md/raid10.c mddev->recovery_cp = MaxSector; mddev 4031 drivers/md/raid10.c mddev->dev_sectors = size; mddev 4033 drivers/md/raid10.c conf = setup_conf(mddev); mddev 4035 drivers/md/raid10.c rdev_for_each(rdev, mddev) mddev 4046 drivers/md/raid10.c static void *raid10_takeover(struct mddev *mddev) mddev 4053 drivers/md/raid10.c if (mddev->level == 0) { mddev 4055 drivers/md/raid10.c raid0_conf = mddev->private; mddev 4058 drivers/md/raid10.c mdname(mddev)); mddev 4061 drivers/md/raid10.c return raid10_takeover_raid0(mddev, mddev 4068 drivers/md/raid10.c static int raid10_check_reshape(struct mddev *mddev) mddev 4084 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 4090 drivers/md/raid10.c if (setup_geo(&geo, mddev, geo_start) != conf->copies) mddev 4097 drivers/md/raid10.c if (mddev->array_sectors & geo.chunk_mask) mddev 4106 drivers/md/raid10.c if (mddev->delta_disks > 0) { mddev 4109 drivers/md/raid10.c kcalloc(mddev->raid_disks + mddev->delta_disks, mddev 4175 drivers/md/raid10.c static int raid10_start_reshape(struct mddev *mddev) mddev 4191 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 4196 drivers/md/raid10.c if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) mddev 4199 drivers/md/raid10.c if (setup_geo(&new, mddev, geo_start) != conf->copies) mddev 4207 drivers/md/raid10.c rdev_for_each(rdev, mddev) { mddev 4214 drivers/md/raid10.c if (!mddev->reshape_backwards) mddev 4227 drivers/md/raid10.c if (spares < mddev->delta_disks) mddev 4241 drivers/md/raid10.c setup_geo(&conf->geo, mddev, geo_start); mddev 4243 drivers/md/raid10.c if (mddev->reshape_backwards) { mddev 4244 drivers/md/raid10.c sector_t size = raid10_size(mddev, 0, 0); mddev 4245 drivers/md/raid10.c if (size < mddev->array_sectors) { mddev 4248 drivers/md/raid10.c mdname(mddev)); mddev 4251 drivers/md/raid10.c mddev->resync_max_sectors = size; mddev 4258 drivers/md/raid10.c if (mddev->delta_disks && mddev->bitmap) { mddev 4262 drivers/md/raid10.c oldsize = raid10_size(mddev, 0, 0); mddev 4263 drivers/md/raid10.c newsize = raid10_size(mddev, 0, conf->geo.raid_disks); mddev 4265 drivers/md/raid10.c if (!mddev_is_clustered(mddev)) { mddev 4266 drivers/md/raid10.c ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); mddev 4273 drivers/md/raid10.c rdev_for_each(rdev, mddev) { mddev 4288 drivers/md/raid10.c ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0); mddev 4292 drivers/md/raid10.c ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize); mddev 4294 drivers/md/raid10.c md_bitmap_resize(mddev->bitmap, oldsize, 0, 0); mddev 4299 drivers/md/raid10.c if (mddev->delta_disks > 0) { mddev 4300 drivers/md/raid10.c rdev_for_each(rdev, mddev) mddev 4303 drivers/md/raid10.c if (raid10_add_disk(mddev, rdev) == 0) { mddev 4310 drivers/md/raid10.c if (sysfs_link_rdev(mddev, rdev)) mddev 4324 drivers/md/raid10.c mddev->degraded = calc_degraded(conf); mddev 4326 drivers/md/raid10.c mddev->raid_disks = conf->geo.raid_disks; mddev 4327 drivers/md/raid10.c mddev->reshape_position = conf->reshape_progress; mddev 4328 drivers/md/raid10.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 4330 drivers/md/raid10.c clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 4331 drivers/md/raid10.c clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); mddev 4332 drivers/md/raid10.c clear_bit(MD_RECOVERY_DONE, &mddev->recovery); mddev 4333 drivers/md/raid10.c set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); mddev 4334 drivers/md/raid10.c set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev 4336 drivers/md/raid10.c mddev->sync_thread = md_register_thread(md_do_sync, mddev, mddev 4338 drivers/md/raid10.c if (!mddev->sync_thread) { mddev 4343 drivers/md/raid10.c md_wakeup_thread(mddev->sync_thread); mddev 4344 drivers/md/raid10.c md_new_event(mddev); mddev 4348 drivers/md/raid10.c mddev->recovery = 0; mddev 4351 drivers/md/raid10.c mddev->raid_disks = conf->geo.raid_disks; mddev 4352 drivers/md/raid10.c rdev_for_each(rdev, mddev) mddev 4357 drivers/md/raid10.c mddev->reshape_position = MaxSector; mddev 4393 drivers/md/raid10.c static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, mddev 4433 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 4448 drivers/md/raid10.c if (mddev->reshape_backwards && mddev 4449 drivers/md/raid10.c conf->reshape_progress < raid10_size(mddev, 0, 0)) { mddev 4450 drivers/md/raid10.c sector_nr = (raid10_size(mddev, 0, 0) mddev 4452 drivers/md/raid10.c } else if (!mddev->reshape_backwards && mddev 4456 drivers/md/raid10.c mddev->curr_resync_completed = sector_nr; mddev 4457 drivers/md/raid10.c sysfs_notify(&mddev->kobj, NULL, "sync_completed"); mddev 4467 drivers/md/raid10.c if (mddev->reshape_backwards) { mddev 4517 drivers/md/raid10.c mddev->reshape_position = conf->reshape_progress; mddev 4518 drivers/md/raid10.c if (mddev->reshape_backwards) mddev 4519 drivers/md/raid10.c mddev->curr_resync_completed = raid10_size(mddev, 0, 0) mddev 4522 drivers/md/raid10.c mddev->curr_resync_completed = conf->reshape_progress; mddev 4524 drivers/md/raid10.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 4525 drivers/md/raid10.c md_wakeup_thread(mddev->thread); mddev 4526 drivers/md/raid10.c wait_event(mddev->sb_wait, mddev->sb_flags == 0 || mddev 4527 drivers/md/raid10.c test_bit(MD_RECOVERY_INTR, &mddev->recovery)); mddev 4528 drivers/md/raid10.c if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { mddev 4532 drivers/md/raid10.c conf->reshape_safe = mddev->reshape_position; mddev 4543 drivers/md/raid10.c r10_bio->mddev = mddev; mddev 4556 drivers/md/raid10.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 4560 drivers/md/raid10.c read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); mddev 4579 drivers/md/raid10.c if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) { mddev 4597 drivers/md/raid10.c md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low, mddev 4667 drivers/md/raid10.c if (mddev->reshape_backwards) mddev 4676 drivers/md/raid10.c static int handle_reshape_read_error(struct mddev *mddev, mddev 4678 drivers/md/raid10.c static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) mddev 4685 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 4689 drivers/md/raid10.c if (handle_reshape_read_error(mddev, r10_bio) < 0) { mddev 4691 drivers/md/raid10.c md_done_sync(mddev, r10_bio->sectors, 0); mddev 4727 drivers/md/raid10.c if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) mddev 4732 drivers/md/raid10.c md_finish_reshape(conf->mddev); mddev 4741 drivers/md/raid10.c if (conf->mddev->queue) { mddev 4743 drivers/md/raid10.c ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); mddev 4745 drivers/md/raid10.c if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) mddev 4746 drivers/md/raid10.c conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; mddev 4751 drivers/md/raid10.c static void raid10_update_reshape_pos(struct mddev *mddev) mddev 4753 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 4756 drivers/md/raid10.c md_cluster_ops->resync_info_get(mddev, &lo, &hi); mddev 4757 drivers/md/raid10.c if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo)) mddev 4758 drivers/md/raid10.c || mddev->reshape_position == MaxSector) mddev 4759 drivers/md/raid10.c conf->reshape_progress = mddev->reshape_position; mddev 4764 drivers/md/raid10.c static int handle_reshape_read_error(struct mddev *mddev, mddev 4769 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 4777 drivers/md/raid10.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 4813 drivers/md/raid10.c rdev_dec_pending(rdev, mddev); mddev 4828 drivers/md/raid10.c &mddev->recovery); mddev 4842 drivers/md/raid10.c struct mddev *mddev = r10_bio->mddev; mddev 4843 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 4859 drivers/md/raid10.c md_error(mddev, rdev); mddev 4862 drivers/md/raid10.c rdev_dec_pending(rdev, mddev); mddev 4870 drivers/md/raid10.c md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); mddev 4875 drivers/md/raid10.c static void raid10_finish_reshape(struct mddev *mddev) mddev 4877 drivers/md/raid10.c struct r10conf *conf = mddev->private; mddev 4879 drivers/md/raid10.c if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev 4882 drivers/md/raid10.c if (mddev->delta_disks > 0) { mddev 4883 drivers/md/raid10.c if (mddev->recovery_cp > mddev->resync_max_sectors) { mddev 4884 drivers/md/raid10.c mddev->recovery_cp = mddev->resync_max_sectors; mddev 4885 drivers/md/raid10.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 4887 drivers/md/raid10.c mddev->resync_max_sectors = mddev->array_sectors; mddev 4892 drivers/md/raid10.c d < conf->geo.raid_disks - mddev->delta_disks; mddev 4903 drivers/md/raid10.c mddev->layout = mddev->new_layout; mddev 4904 drivers/md/raid10.c mddev->chunk_sectors = 1 << conf->geo.chunk_shift; mddev 4905 drivers/md/raid10.c mddev->reshape_position = MaxSector; mddev 4906 drivers/md/raid10.c mddev->delta_disks = 0; mddev 4907 drivers/md/raid10.c mddev->reshape_backwards = 0; mddev 29 drivers/md/raid10.h struct mddev *mddev; mddev 127 drivers/md/raid10.h struct mddev *mddev; mddev 303 drivers/md/raid5-cache.c md_write_end(conf->mddev); mddev 318 drivers/md/raid5-cache.c md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, mddev 420 drivers/md/raid5-cache.c struct r5conf *conf = log->rdev->mddev->private; mddev 570 drivers/md/raid5-cache.c md_error(log->rdev->mddev, log->rdev); mddev 605 drivers/md/raid5-cache.c md_wakeup_thread(log->rdev->mddev->thread); mddev 686 drivers/md/raid5-cache.c struct mddev *mddev = log->rdev->mddev; mddev 687 drivers/md/raid5-cache.c struct r5conf *conf = mddev->private; mddev 693 drivers/md/raid5-cache.c mdname(mddev)); mddev 696 drivers/md/raid5-cache.c wait_event(mddev->sb_wait, mddev 698 drivers/md/raid5-cache.c (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && mddev 699 drivers/md/raid5-cache.c (locked = mddev_trylock(mddev)))); mddev 701 drivers/md/raid5-cache.c mddev_suspend(mddev); mddev 703 drivers/md/raid5-cache.c mddev_resume(mddev); mddev 704 drivers/md/raid5-cache.c mddev_unlock(mddev); mddev 866 drivers/md/raid5-cache.c struct mddev *mddev = log->rdev->mddev; mddev 867 drivers/md/raid5-cache.c struct r5conf *conf = mddev->private; mddev 1179 drivers/md/raid5-cache.c struct r5conf *conf = log->rdev->mddev->private; mddev 1227 drivers/md/raid5-cache.c struct r5conf *conf = log->rdev->mddev->private; mddev 1265 drivers/md/raid5-cache.c md_error(log->rdev->mddev, log->rdev); mddev 1319 drivers/md/raid5-cache.c struct mddev *mddev; mddev 1326 drivers/md/raid5-cache.c mddev = log->rdev->mddev; mddev 1338 drivers/md/raid5-cache.c set_mask_bits(&mddev->sb_flags, 0, mddev 1340 drivers/md/raid5-cache.c if (!mddev_trylock(mddev)) mddev 1342 drivers/md/raid5-cache.c md_update_sb(mddev, 1); mddev 1343 drivers/md/raid5-cache.c mddev_unlock(mddev); mddev 1496 drivers/md/raid5-cache.c md_wakeup_thread(conf->mddev->thread); mddev 1501 drivers/md/raid5-cache.c struct r5conf *conf = log->rdev->mddev->private; mddev 1524 drivers/md/raid5-cache.c md_wakeup_thread(log->rdev->mddev->thread); mddev 1553 drivers/md/raid5-cache.c struct mddev *mddev = thread->mddev; mddev 1554 drivers/md/raid5-cache.c struct r5conf *conf = mddev->private; mddev 1580 drivers/md/raid5-cache.c struct mddev *mddev; mddev 1584 drivers/md/raid5-cache.c mddev = log->rdev->mddev; mddev 1585 drivers/md/raid5-cache.c wake_up(&mddev->sb_wait); mddev 1602 drivers/md/raid5-cache.c ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); mddev 1818 drivers/md/raid5-cache.c struct mddev *mddev = log->rdev->mddev; mddev 1819 drivers/md/raid5-cache.c struct r5conf *conf = mddev->private; mddev 1840 drivers/md/raid5-cache.c struct mddev *mddev = log->rdev->mddev; mddev 1841 drivers/md/raid5-cache.c struct r5conf *conf = mddev->private; mddev 1908 drivers/md/raid5-cache.c rdev_dec_pending(rdev, rdev->mddev); mddev 1918 drivers/md/raid5-cache.c rdev_dec_pending(rrdev, rrdev->mddev); mddev 2008 drivers/md/raid5-cache.c struct mddev *mddev = log->rdev->mddev; mddev 2009 drivers/md/raid5-cache.c struct r5conf *conf = mddev->private; mddev 2082 drivers/md/raid5-cache.c struct mddev *mddev = log->rdev->mddev; mddev 2083 drivers/md/raid5-cache.c struct r5conf *conf = mddev->private; mddev 2160 drivers/md/raid5-cache.c mdname(mddev), mddev 2162 drivers/md/raid5-cache.c ret = raid5_set_cache_size(mddev, new_size); mddev 2165 drivers/md/raid5-cache.c mdname(mddev), mddev 2177 drivers/md/raid5-cache.c mdname(mddev)); mddev 2358 drivers/md/raid5-cache.c struct mddev *mddev = log->rdev->mddev; mddev 2365 drivers/md/raid5-cache.c mdname(mddev)); mddev 2430 drivers/md/raid5-cache.c struct mddev *mddev = log->rdev->mddev; mddev 2431 drivers/md/raid5-cache.c struct r5conf *conf = mddev->private; mddev 2455 drivers/md/raid5-cache.c struct mddev *mddev = log->rdev->mddev; mddev 2489 drivers/md/raid5-cache.c mdname(mddev)); mddev 2492 drivers/md/raid5-cache.c mdname(mddev), ctx->data_only_stripes, mddev 2501 drivers/md/raid5-cache.c mdname(mddev)); mddev 2524 drivers/md/raid5-cache.c struct mddev *mddev = log->rdev->mddev; mddev 2527 drivers/md/raid5-cache.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 2530 drivers/md/raid5-cache.c static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) mddev 2535 drivers/md/raid5-cache.c ret = mddev_lock(mddev); mddev 2539 drivers/md/raid5-cache.c conf = mddev->private; mddev 2541 drivers/md/raid5-cache.c mddev_unlock(mddev); mddev 2561 drivers/md/raid5-cache.c mddev_unlock(mddev); mddev 2571 drivers/md/raid5-cache.c int r5c_journal_mode_set(struct mddev *mddev, int mode) mddev 2579 drivers/md/raid5-cache.c conf = mddev->private; mddev 2587 drivers/md/raid5-cache.c mddev_suspend(mddev); mddev 2589 drivers/md/raid5-cache.c mddev_resume(mddev); mddev 2592 drivers/md/raid5-cache.c mdname(mddev), mode, r5c_journal_mode_str[mode]); mddev 2597 drivers/md/raid5-cache.c static ssize_t r5c_journal_mode_store(struct mddev *mddev, mddev 2614 drivers/md/raid5-cache.c ret = mddev_lock(mddev); mddev 2617 drivers/md/raid5-cache.c ret = r5c_journal_mode_set(mddev, mode); mddev 2618 drivers/md/raid5-cache.c mddev_unlock(mddev); mddev 2780 drivers/md/raid5-cache.c md_wakeup_thread(conf->mddev->thread); mddev 2836 drivers/md/raid5-cache.c md_wakeup_thread(conf->mddev->thread); mddev 3042 drivers/md/raid5-cache.c struct mddev *mddev = log->rdev->mddev; mddev 3043 drivers/md/raid5-cache.c struct r5conf *conf = mddev->private; mddev 3050 drivers/md/raid5-cache.c void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev) mddev 3052 drivers/md/raid5-cache.c struct r5conf *conf = mddev->private; mddev 3072 drivers/md/raid5-cache.c mdname(conf->mddev), bdevname(rdev->bdev, b)); mddev 3088 drivers/md/raid5-cache.c mdname(conf->mddev), conf->raid_disks); mddev 3099 drivers/md/raid5-cache.c log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, mddev 3100 drivers/md/raid5-cache.c sizeof(rdev->mddev->uuid)); mddev 3131 drivers/md/raid5-cache.c log->rdev->mddev, "reclaim"); mddev 3153 drivers/md/raid5-cache.c set_bit(MD_HAS_JOURNAL, &conf->mddev->flags); mddev 3177 drivers/md/raid5-cache.c wake_up(&conf->mddev->sb_wait); mddev 32 drivers/md/raid5-log.h extern void r5c_update_on_rdev_error(struct mddev *mddev, mddev 52 drivers/md/raid5-log.h return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); mddev 57 drivers/md/raid5-log.h return test_bit(MD_HAS_PPL, &conf->mddev->flags); mddev 88 drivers/md/raid5-ppl.c struct mddev *mddev; mddev 407 drivers/md/raid5-ppl.c md_error(ppl_conf->mddev, log->rdev); mddev 557 drivers/md/raid5-ppl.c struct r5conf *conf = ppl_conf->mddev->private; mddev 592 drivers/md/raid5-ppl.c struct r5conf *conf = ppl_conf->mddev->private; mddev 601 drivers/md/raid5-ppl.c rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio)); mddev 603 drivers/md/raid5-ppl.c md_error(rdev->mddev, rdev); mddev 611 drivers/md/raid5-ppl.c md_wakeup_thread(conf->mddev->thread); mddev 619 drivers/md/raid5-ppl.c struct r5conf *conf = ppl_conf->mddev->private; mddev 799 drivers/md/raid5-ppl.c struct mddev *mddev = ppl_conf->mddev; mddev 800 drivers/md/raid5-ppl.c struct r5conf *conf = mddev->private; mddev 909 drivers/md/raid5-ppl.c md_error(mddev, rdev); mddev 934 drivers/md/raid5-ppl.c md_error(mddev, log->rdev); mddev 957 drivers/md/raid5-ppl.c md_error(mddev, parity_rdev); mddev 975 drivers/md/raid5-ppl.c struct mddev *mddev = rdev->mddev; mddev 1007 drivers/md/raid5-ppl.c md_error(mddev, rdev); mddev 1071 drivers/md/raid5-ppl.c md_error(rdev->mddev, rdev); mddev 1083 drivers/md/raid5-ppl.c struct mddev *mddev = rdev->mddev; mddev 1109 drivers/md/raid5-ppl.c md_error(mddev, rdev); mddev 1133 drivers/md/raid5-ppl.c if (mddev->external) { mddev 1179 drivers/md/raid5-ppl.c if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector) mddev 1183 drivers/md/raid5-ppl.c if (!ret && !mddev->pers) mddev 1218 drivers/md/raid5-ppl.c if (ppl_conf->mddev->external) { mddev 1224 drivers/md/raid5-ppl.c mdname(ppl_conf->mddev)); mddev 1239 drivers/md/raid5-ppl.c clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); mddev 1240 drivers/md/raid5-ppl.c clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags); mddev 1281 drivers/md/raid5-ppl.c mdname(rdev->mddev), bdevname(rdev->bdev, b)); mddev 1292 drivers/md/raid5-ppl.c mdname(rdev->mddev), bdevname(rdev->bdev, b)); mddev 1296 drivers/md/raid5-ppl.c if (!rdev->mddev->external && mddev 1300 drivers/md/raid5-ppl.c mdname(rdev->mddev), bdevname(rdev->bdev, b)); mddev 1317 drivers/md/raid5-ppl.c &log->ppl_conf->mddev->flags); mddev 1334 drivers/md/raid5-ppl.c struct mddev *mddev = conf->mddev; mddev 1340 drivers/md/raid5-ppl.c mdname(conf->mddev)); mddev 1345 drivers/md/raid5-ppl.c if (mddev->level != 5) { mddev 1347 drivers/md/raid5-ppl.c mdname(mddev), mddev->level); mddev 1351 drivers/md/raid5-ppl.c if (mddev->bitmap_info.file || mddev->bitmap_info.offset) { mddev 1353 drivers/md/raid5-ppl.c mdname(mddev)); mddev 1357 drivers/md/raid5-ppl.c if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { mddev 1359 drivers/md/raid5-ppl.c mdname(mddev)); mddev 1367 drivers/md/raid5-ppl.c mdname(mddev), max_disks); mddev 1375 drivers/md/raid5-ppl.c ppl_conf->mddev = mddev; mddev 1409 drivers/md/raid5-ppl.c if (!mddev->external) { mddev 1410 drivers/md/raid5-ppl.c ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid)); mddev 1413 drivers/md/raid5-ppl.c ppl_conf->block_size = queue_logical_block_size(mddev->queue); mddev 1441 drivers/md/raid5-ppl.c } else if (!mddev->pers && mddev->recovery_cp == 0 && mddev 1448 drivers/md/raid5-ppl.c mddev->recovery_cp = MaxSector; mddev 1449 drivers/md/raid5-ppl.c set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); mddev 1450 drivers/md/raid5-ppl.c } else if (mddev->pers && ppl_conf->mismatch_count > 0) { mddev 1457 drivers/md/raid5-ppl.c set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); mddev 1504 drivers/md/raid5-ppl.c ppl_write_hint_show(struct mddev *mddev, char *buf) mddev 1510 drivers/md/raid5-ppl.c spin_lock(&mddev->lock); mddev 1511 drivers/md/raid5-ppl.c conf = mddev->private; mddev 1515 drivers/md/raid5-ppl.c spin_unlock(&mddev->lock); mddev 1521 drivers/md/raid5-ppl.c ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len) mddev 1533 drivers/md/raid5-ppl.c err = mddev_lock(mddev); mddev 1537 drivers/md/raid5-ppl.c conf = mddev->private; mddev 1550 drivers/md/raid5-ppl.c mddev_unlock(mddev); mddev 190 drivers/md/raid5.c md_wakeup_thread(conf->mddev->thread); mddev 262 drivers/md/raid5.c md_wakeup_thread(conf->mddev->thread); mddev 268 drivers/md/raid5.c md_wakeup_thread(conf->mddev->thread); mddev 349 drivers/md/raid5.c md_wakeup_thread(conf->mddev->thread); mddev 395 drivers/md/raid5.c if (unlikely(!conf->mddev->thread) || mddev 400 drivers/md/raid5.c md_wakeup_thread(conf->mddev->thread); mddev 616 drivers/md/raid5.c if (conf->mddev->reshape_position == MaxSector) mddev 617 drivers/md/raid5.c return conf->mddev->degraded > conf->max_degraded; mddev 832 drivers/md/raid5.c md_wakeup_thread(conf->mddev->thread); mddev 1067 drivers/md/raid5.c if (!conf->mddev->external && mddev 1068 drivers/md/raid5.c conf->mddev->sb_flags) { mddev 1073 drivers/md/raid5.c md_check_recovery(conf->mddev); mddev 1081 drivers/md/raid5.c md_wait_for_blocked_rdev(rdev, conf->mddev); mddev 1084 drivers/md/raid5.c rdev_dec_pending(rdev, conf->mddev); mddev 1147 drivers/md/raid5.c if (conf->mddev->gendisk) mddev 1149 drivers/md/raid5.c bi, disk_devt(conf->mddev->gendisk), mddev 1197 drivers/md/raid5.c if (conf->mddev->gendisk) mddev 1199 drivers/md/raid5.c rbi, disk_devt(conf->mddev->gendisk), mddev 2194 drivers/md/raid5.c if (conf->mddev->gendisk) mddev 2196 drivers/md/raid5.c "raid%d-%s", conf->level, mdname(conf->mddev)); mddev 2199 drivers/md/raid5.c "raid%d-%p", conf->level, conf->mddev); mddev 2262 drivers/md/raid5.c mddev_suspend(conf->mddev); mddev 2277 drivers/md/raid5.c mddev_resume(conf->mddev); mddev 2318 drivers/md/raid5.c md_allow_write(conf->mddev); mddev 2505 drivers/md/raid5.c mdname(conf->mddev), STRIPE_SECTORS, mddev 2534 drivers/md/raid5.c mdname(conf->mddev), mddev 2537 drivers/md/raid5.c else if (conf->mddev->degraded >= conf->max_degraded) { mddev 2541 drivers/md/raid5.c mdname(conf->mddev), mddev 2549 drivers/md/raid5.c mdname(conf->mddev), mddev 2556 drivers/md/raid5.c mdname(conf->mddev), mddev 2560 drivers/md/raid5.c mdname(conf->mddev), bdn); mddev 2582 drivers/md/raid5.c md_error(conf->mddev, rdev); mddev 2585 drivers/md/raid5.c rdev_dec_pending(rdev, conf->mddev); mddev 2631 drivers/md/raid5.c md_error(conf->mddev, rdev); mddev 2643 drivers/md/raid5.c &rdev->mddev->recovery); mddev 2656 drivers/md/raid5.c rdev_dec_pending(rdev, conf->mddev); mddev 2671 drivers/md/raid5.c static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) mddev 2674 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 2681 drivers/md/raid5.c mddev->degraded == conf->max_degraded) { mddev 2686 drivers/md/raid5.c conf->recovery_disabled = mddev->recovery_disabled; mddev 2693 drivers/md/raid5.c mddev->degraded = raid5_calc_degraded(conf); mddev 2695 drivers/md/raid5.c set_bit(MD_RECOVERY_INTR, &mddev->recovery); mddev 2698 drivers/md/raid5.c set_mask_bits(&mddev->sb_flags, 0, mddev 2702 drivers/md/raid5.c mdname(mddev), mddev 2704 drivers/md/raid5.c mdname(mddev), mddev 2705 drivers/md/raid5.c conf->raid_disks - mddev->degraded); mddev 2706 drivers/md/raid5.c r5c_update_on_rdev_error(mddev, rdev); mddev 3029 drivers/md/raid5.c mdname(conf->mddev)); mddev 3273 drivers/md/raid5.c md_write_inc(conf->mddev, bi); mddev 3294 drivers/md/raid5.c if (conf->mddev->bitmap && firstwrite) { mddev 3309 drivers/md/raid5.c md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, mddev 3373 drivers/md/raid5.c md_error(conf->mddev, rdev); mddev 3374 drivers/md/raid5.c rdev_dec_pending(rdev, conf->mddev); mddev 3395 drivers/md/raid5.c md_write_end(conf->mddev); mddev 3400 drivers/md/raid5.c md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, mddev 3416 drivers/md/raid5.c md_write_end(conf->mddev); mddev 3446 drivers/md/raid5.c md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, mddev 3458 drivers/md/raid5.c md_wakeup_thread(conf->mddev->thread); mddev 3481 drivers/md/raid5.c if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { mddev 3505 drivers/md/raid5.c conf->mddev->recovery_disabled; mddev 3507 drivers/md/raid5.c md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); mddev 3521 drivers/md/raid5.c || rdev->mddev->recovery_cp <= sh->sector)) mddev 3603 drivers/md/raid5.c sh->sector < sh->raid_conf->mddev->recovery_cp) mddev 3783 drivers/md/raid5.c md_write_end(conf->mddev); mddev 3787 drivers/md/raid5.c md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, mddev 3844 drivers/md/raid5.c md_wakeup_thread(conf->mddev->thread); mddev 3871 drivers/md/raid5.c sector_t recovery_cp = conf->mddev->recovery_cp; mddev 3922 drivers/md/raid5.c if (conf->mddev->queue) mddev 3923 drivers/md/raid5.c blk_add_trace_msg(conf->mddev->queue, mddev 4006 drivers/md/raid5.c if (rcw && conf->mddev->queue) mddev 4007 drivers/md/raid5.c blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", mddev 4095 drivers/md/raid5.c atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); mddev 4096 drivers/md/raid5.c if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { mddev 4100 drivers/md/raid5.c "%llu-%llu\n", mdname(conf->mddev), mddev 4222 drivers/md/raid5.c mdname(conf->mddev), mddev 4260 drivers/md/raid5.c atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); mddev 4261 drivers/md/raid5.c if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { mddev 4265 drivers/md/raid5.c "%llu-%llu\n", mdname(conf->mddev), mddev 4560 drivers/md/raid5.c sh->sector >= conf->mddev->recovery_cp || mddev 4561 drivers/md/raid5.c test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) mddev 4723 drivers/md/raid5.c test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { mddev 4735 drivers/md/raid5.c rdev_dec_pending(s.blocked_rdev, conf->mddev); mddev 4923 drivers/md/raid5.c md_done_sync(conf->mddev, STRIPE_SECTORS, 1); mddev 4932 drivers/md/raid5.c if (s.failed <= conf->max_degraded && !conf->mddev->ro) mddev 4991 drivers/md/raid5.c md_done_sync(conf->mddev, STRIPE_SECTORS, 1); mddev 5001 drivers/md/raid5.c if (conf->mddev->external) mddev 5003 drivers/md/raid5.c conf->mddev); mddev 5010 drivers/md/raid5.c conf->mddev); mddev 5022 drivers/md/raid5.c md_error(conf->mddev, rdev); mddev 5023 drivers/md/raid5.c rdev_dec_pending(rdev, conf->mddev); mddev 5029 drivers/md/raid5.c rdev_dec_pending(rdev, conf->mddev); mddev 5038 drivers/md/raid5.c rdev_dec_pending(rdev, conf->mddev); mddev 5055 drivers/md/raid5.c md_wakeup_thread(conf->mddev->thread); mddev 5095 drivers/md/raid5.c static int raid5_congested(struct mddev *mddev, int bits) mddev 5097 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 5117 drivers/md/raid5.c static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) mddev 5119 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 5145 drivers/md/raid5.c md_wakeup_thread(conf->mddev->thread); mddev 5178 drivers/md/raid5.c struct mddev *mddev; mddev 5187 drivers/md/raid5.c mddev = rdev->mddev; mddev 5188 drivers/md/raid5.c conf = mddev->private; mddev 5190 drivers/md/raid5.c rdev_dec_pending(rdev, conf->mddev); mddev 5204 drivers/md/raid5.c static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) mddev 5206 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 5212 drivers/md/raid5.c if (!in_chunk_boundary(mddev, raid_bio)) { mddev 5219 drivers/md/raid5.c align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set); mddev 5267 drivers/md/raid5.c rdev_dec_pending(rdev, mddev); mddev 5281 drivers/md/raid5.c if (mddev->gendisk) mddev 5283 drivers/md/raid5.c align_bi, disk_devt(mddev->gendisk), mddev 5294 drivers/md/raid5.c static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) mddev 5298 drivers/md/raid5.c unsigned chunk_sects = mddev->chunk_sectors; mddev 5302 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 5309 drivers/md/raid5.c if (!raid5_read_one_chunk(mddev, raid_bio)) mddev 5428 drivers/md/raid5.c struct mddev *mddev = cb->cb.data; mddev 5429 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 5457 drivers/md/raid5.c if (mddev->queue) mddev 5458 drivers/md/raid5.c trace_block_unplug(mddev->queue, cnt, !from_schedule); mddev 5462 drivers/md/raid5.c static void release_stripe_plug(struct mddev *mddev, mddev 5466 drivers/md/raid5.c raid5_unplug, mddev, mddev 5490 drivers/md/raid5.c static void make_discard_request(struct mddev *mddev, struct bio *bi) mddev 5492 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 5497 drivers/md/raid5.c if (mddev->reshape_position != MaxSector) mddev 5551 drivers/md/raid5.c md_write_inc(mddev, bi); mddev 5555 drivers/md/raid5.c if (conf->mddev->bitmap) { mddev 5559 drivers/md/raid5.c md_bitmap_startwrite(mddev->bitmap, mddev 5571 drivers/md/raid5.c release_stripe_plug(mddev, sh); mddev 5577 drivers/md/raid5.c static bool raid5_make_request(struct mddev *mddev, struct bio * bi) mddev 5579 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 5595 drivers/md/raid5.c if (md_flush_request(mddev, bi)) mddev 5606 drivers/md/raid5.c if (!md_write_start(mddev, bi)) mddev 5613 drivers/md/raid5.c if (rw == READ && mddev->degraded == 0 && mddev 5614 drivers/md/raid5.c mddev->reshape_position == MaxSector) { mddev 5615 drivers/md/raid5.c bi = chunk_aligned_read(mddev, bi); mddev 5621 drivers/md/raid5.c make_discard_request(mddev, bi); mddev 5622 drivers/md/raid5.c md_write_end(mddev); mddev 5652 drivers/md/raid5.c if (mddev->reshape_backwards mddev 5657 drivers/md/raid5.c if (mddev->reshape_backwards mddev 5690 drivers/md/raid5.c if (mddev->reshape_backwards mddev 5717 drivers/md/raid5.c md_wakeup_thread(mddev->thread); mddev 5736 drivers/md/raid5.c release_stripe_plug(mddev, sh); mddev 5746 drivers/md/raid5.c md_write_end(mddev); mddev 5751 drivers/md/raid5.c static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); mddev 5753 drivers/md/raid5.c static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) mddev 5764 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 5781 drivers/md/raid5.c if (mddev->reshape_backwards && mddev 5782 drivers/md/raid5.c conf->reshape_progress < raid5_size(mddev, 0, 0)) { mddev 5783 drivers/md/raid5.c sector_nr = raid5_size(mddev, 0, 0) mddev 5785 drivers/md/raid5.c } else if (mddev->reshape_backwards && mddev 5789 drivers/md/raid5.c } else if (!mddev->reshape_backwards && mddev 5794 drivers/md/raid5.c mddev->curr_resync_completed = sector_nr; mddev 5795 drivers/md/raid5.c sysfs_notify(&mddev->kobj, NULL, "sync_completed"); mddev 5821 drivers/md/raid5.c if (mddev->reshape_backwards) { mddev 5839 drivers/md/raid5.c if (mddev->reshape_backwards) { mddev 5842 drivers/md/raid5.c BUG_ON((mddev->dev_sectors & mddev 5877 drivers/md/raid5.c if ((mddev->reshape_backwards mddev 5884 drivers/md/raid5.c || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); mddev 5887 drivers/md/raid5.c mddev->reshape_position = conf->reshape_progress; mddev 5888 drivers/md/raid5.c mddev->curr_resync_completed = sector_nr; mddev 5889 drivers/md/raid5.c if (!mddev->reshape_backwards) mddev 5891 drivers/md/raid5.c rdev_for_each(rdev, mddev) mddev 5899 drivers/md/raid5.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 5900 drivers/md/raid5.c md_wakeup_thread(mddev->thread); mddev 5901 drivers/md/raid5.c wait_event(mddev->sb_wait, mddev->sb_flags == 0 || mddev 5902 drivers/md/raid5.c test_bit(MD_RECOVERY_INTR, &mddev->recovery)); mddev 5903 drivers/md/raid5.c if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev 5906 drivers/md/raid5.c conf->reshape_safe = mddev->reshape_position; mddev 5909 drivers/md/raid5.c sysfs_notify(&mddev->kobj, NULL, "sync_completed"); mddev 5930 drivers/md/raid5.c if (s < raid5_size(mddev, 0, 0)) { mddev 5945 drivers/md/raid5.c if (mddev->reshape_backwards) mddev 5962 drivers/md/raid5.c if (last_sector >= mddev->dev_sectors) mddev 5963 drivers/md/raid5.c last_sector = mddev->dev_sectors - 1; mddev 5985 drivers/md/raid5.c if (mddev->curr_resync_completed > mddev->resync_max || mddev 5986 drivers/md/raid5.c (sector_nr - mddev->curr_resync_completed) * 2 mddev 5987 drivers/md/raid5.c >= mddev->resync_max - mddev->curr_resync_completed) { mddev 5991 drivers/md/raid5.c || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); mddev 5994 drivers/md/raid5.c mddev->reshape_position = conf->reshape_progress; mddev 5995 drivers/md/raid5.c mddev->curr_resync_completed = sector_nr; mddev 5996 drivers/md/raid5.c if (!mddev->reshape_backwards) mddev 5998 drivers/md/raid5.c rdev_for_each(rdev, mddev) mddev 6005 drivers/md/raid5.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 6006 drivers/md/raid5.c md_wakeup_thread(mddev->thread); mddev 6007 drivers/md/raid5.c wait_event(mddev->sb_wait, mddev 6008 drivers/md/raid5.c !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) mddev 6009 drivers/md/raid5.c || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); mddev 6010 drivers/md/raid5.c if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev 6013 drivers/md/raid5.c conf->reshape_safe = mddev->reshape_position; mddev 6016 drivers/md/raid5.c sysfs_notify(&mddev->kobj, NULL, "sync_completed"); mddev 6022 drivers/md/raid5.c static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, mddev 6025 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 6027 drivers/md/raid5.c sector_t max_sector = mddev->dev_sectors; mddev 6035 drivers/md/raid5.c if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { mddev 6040 drivers/md/raid5.c if (mddev->curr_resync < max_sector) /* aborted */ mddev 6041 drivers/md/raid5.c md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync, mddev 6045 drivers/md/raid5.c md_bitmap_close_sync(mddev->bitmap); mddev 6053 drivers/md/raid5.c if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) mddev 6054 drivers/md/raid5.c return reshape_request(mddev, sector_nr, skipped); mddev 6066 drivers/md/raid5.c if (mddev->degraded >= conf->max_degraded && mddev 6067 drivers/md/raid5.c test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { mddev 6068 drivers/md/raid5.c sector_t rv = mddev->dev_sectors - sector_nr; mddev 6072 drivers/md/raid5.c if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && mddev 6074 drivers/md/raid5.c !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && mddev 6082 drivers/md/raid5.c md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false); mddev 6105 drivers/md/raid5.c md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); mddev 6234 drivers/md/raid5.c struct mddev *mddev = conf->mddev; mddev 6255 drivers/md/raid5.c wait_event_lock_irq(mddev->sb_wait, mddev 6256 drivers/md/raid5.c !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags), mddev 6282 drivers/md/raid5.c struct mddev *mddev = thread->mddev; mddev 6283 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 6289 drivers/md/raid5.c md_check_recovery(mddev); mddev 6308 drivers/md/raid5.c md_bitmap_unplug(mddev->bitmap); mddev 6331 drivers/md/raid5.c if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { mddev 6333 drivers/md/raid5.c md_check_recovery(mddev); mddev 6361 drivers/md/raid5.c raid5_show_stripe_cache_size(struct mddev *mddev, char *page) mddev 6365 drivers/md/raid5.c spin_lock(&mddev->lock); mddev 6366 drivers/md/raid5.c conf = mddev->private; mddev 6369 drivers/md/raid5.c spin_unlock(&mddev->lock); mddev 6374 drivers/md/raid5.c raid5_set_cache_size(struct mddev *mddev, int size) mddev 6377 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 6389 drivers/md/raid5.c md_allow_write(mddev); mddev 6405 drivers/md/raid5.c raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) mddev 6415 drivers/md/raid5.c err = mddev_lock(mddev); mddev 6418 drivers/md/raid5.c conf = mddev->private; mddev 6422 drivers/md/raid5.c err = raid5_set_cache_size(mddev, new); mddev 6423 drivers/md/raid5.c mddev_unlock(mddev); mddev 6434 drivers/md/raid5.c raid5_show_rmw_level(struct mddev *mddev, char *page) mddev 6436 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 6444 drivers/md/raid5.c raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) mddev 6446 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 6477 drivers/md/raid5.c raid5_show_preread_threshold(struct mddev *mddev, char *page) mddev 6481 drivers/md/raid5.c spin_lock(&mddev->lock); mddev 6482 drivers/md/raid5.c conf = mddev->private; mddev 6485 drivers/md/raid5.c spin_unlock(&mddev->lock); mddev 6490 drivers/md/raid5.c raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) mddev 6501 drivers/md/raid5.c err = mddev_lock(mddev); mddev 6504 drivers/md/raid5.c conf = mddev->private; mddev 6511 drivers/md/raid5.c mddev_unlock(mddev); mddev 6522 drivers/md/raid5.c raid5_show_skip_copy(struct mddev *mddev, char *page) mddev 6526 drivers/md/raid5.c spin_lock(&mddev->lock); mddev 6527 drivers/md/raid5.c conf = mddev->private; mddev 6530 drivers/md/raid5.c spin_unlock(&mddev->lock); mddev 6535 drivers/md/raid5.c raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) mddev 6547 drivers/md/raid5.c err = mddev_lock(mddev); mddev 6550 drivers/md/raid5.c conf = mddev->private; mddev 6554 drivers/md/raid5.c mddev_suspend(mddev); mddev 6557 drivers/md/raid5.c mddev->queue->backing_dev_info->capabilities |= mddev 6560 drivers/md/raid5.c mddev->queue->backing_dev_info->capabilities &= mddev 6562 drivers/md/raid5.c mddev_resume(mddev); mddev 6564 drivers/md/raid5.c mddev_unlock(mddev); mddev 6574 drivers/md/raid5.c stripe_cache_active_show(struct mddev *mddev, char *page) mddev 6576 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 6587 drivers/md/raid5.c raid5_show_group_thread_cnt(struct mddev *mddev, char *page) mddev 6591 drivers/md/raid5.c spin_lock(&mddev->lock); mddev 6592 drivers/md/raid5.c conf = mddev->private; mddev 6595 drivers/md/raid5.c spin_unlock(&mddev->lock); mddev 6604 drivers/md/raid5.c raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) mddev 6620 drivers/md/raid5.c err = mddev_lock(mddev); mddev 6623 drivers/md/raid5.c conf = mddev->private; mddev 6627 drivers/md/raid5.c mddev_suspend(mddev); mddev 6647 drivers/md/raid5.c mddev_resume(mddev); mddev 6649 drivers/md/raid5.c mddev_unlock(mddev); mddev 6732 drivers/md/raid5.c raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) mddev 6734 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 6737 drivers/md/raid5.c sectors = mddev->dev_sectors; mddev 6877 drivers/md/raid5.c static struct r5conf *setup_conf(struct mddev *mddev) mddev 6889 drivers/md/raid5.c if (mddev->new_level != 5 mddev 6890 drivers/md/raid5.c && mddev->new_level != 4 mddev 6891 drivers/md/raid5.c && mddev->new_level != 6) { mddev 6893 drivers/md/raid5.c mdname(mddev), mddev->new_level); mddev 6896 drivers/md/raid5.c if ((mddev->new_level == 5 mddev 6897 drivers/md/raid5.c && !algorithm_valid_raid5(mddev->new_layout)) || mddev 6898 drivers/md/raid5.c (mddev->new_level == 6 mddev 6899 drivers/md/raid5.c && !algorithm_valid_raid6(mddev->new_layout))) { mddev 6901 drivers/md/raid5.c mdname(mddev), mddev->new_layout); mddev 6904 drivers/md/raid5.c if (mddev->new_level == 6 && mddev->raid_disks < 4) { mddev 6906 drivers/md/raid5.c mdname(mddev), mddev->raid_disks); mddev 6910 drivers/md/raid5.c if (!mddev->new_chunk_sectors || mddev 6911 drivers/md/raid5.c (mddev->new_chunk_sectors << 9) % PAGE_SIZE || mddev 6912 drivers/md/raid5.c !is_power_of_2(mddev->new_chunk_sectors)) { mddev 6914 drivers/md/raid5.c mdname(mddev), mddev->new_chunk_sectors << 9); mddev 6955 drivers/md/raid5.c rdev_for_each(rdev, mddev) { mddev 6965 drivers/md/raid5.c conf->recovery_disabled = mddev->recovery_disabled - 1; mddev 6967 drivers/md/raid5.c conf->raid_disks = mddev->raid_disks; mddev 6968 drivers/md/raid5.c if (mddev->reshape_position == MaxSector) mddev 6969 drivers/md/raid5.c conf->previous_raid_disks = mddev->raid_disks; mddev 6971 drivers/md/raid5.c conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; mddev 6989 drivers/md/raid5.c conf->mddev = mddev; mddev 7016 drivers/md/raid5.c conf->level = mddev->new_level; mddev 7017 drivers/md/raid5.c conf->chunk_sectors = mddev->new_chunk_sectors; mddev 7021 drivers/md/raid5.c pr_debug("raid456: run(%s) called.\n", mdname(mddev)); mddev 7023 drivers/md/raid5.c rdev_for_each(rdev, mddev) { mddev 7043 drivers/md/raid5.c mdname(mddev), bdevname(rdev->bdev, b), raid_disk); mddev 7049 drivers/md/raid5.c conf->level = mddev->new_level; mddev 7060 drivers/md/raid5.c conf->algorithm = mddev->new_layout; mddev 7061 drivers/md/raid5.c conf->reshape_progress = mddev->reshape_position; mddev 7063 drivers/md/raid5.c conf->prev_chunk_sectors = mddev->chunk_sectors; mddev 7064 drivers/md/raid5.c conf->prev_algo = mddev->layout; mddev 7071 drivers/md/raid5.c if (mddev->reshape_position != MaxSector) { mddev 7073 drivers/md/raid5.c ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4, mddev 7074 drivers/md/raid5.c ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4); mddev 7078 drivers/md/raid5.c mdname(mddev), conf->min_nr_stripes); mddev 7085 drivers/md/raid5.c mdname(mddev), memory); mddev 7088 drivers/md/raid5.c pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); mddev 7101 drivers/md/raid5.c mdname(mddev)); mddev 7105 drivers/md/raid5.c sprintf(pers_name, "raid%d", mddev->new_level); mddev 7106 drivers/md/raid5.c conf->thread = md_register_thread(raid5d, mddev, pers_name); mddev 7109 drivers/md/raid5.c mdname(mddev)); mddev 7149 drivers/md/raid5.c static int raid5_run(struct mddev *mddev) mddev 7161 drivers/md/raid5.c if (mddev_init_writes_pending(mddev) < 0) mddev 7164 drivers/md/raid5.c if (mddev->recovery_cp != MaxSector) mddev 7166 drivers/md/raid5.c mdname(mddev)); mddev 7168 drivers/md/raid5.c rdev_for_each(rdev, mddev) { mddev 7181 drivers/md/raid5.c } else if (mddev->reshape_backwards && mddev 7184 drivers/md/raid5.c else if (!mddev->reshape_backwards && mddev 7189 drivers/md/raid5.c if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) && mddev 7190 drivers/md/raid5.c (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { mddev 7192 drivers/md/raid5.c mdname(mddev)); mddev 7196 drivers/md/raid5.c if (mddev->reshape_position != MaxSector) { mddev 7211 drivers/md/raid5.c int max_degraded = (mddev->level == 6 ? 2 : 1); mddev 7217 drivers/md/raid5.c mdname(mddev)); mddev 7221 drivers/md/raid5.c if (mddev->new_level != mddev->level) { mddev 7223 drivers/md/raid5.c mdname(mddev)); mddev 7226 drivers/md/raid5.c old_disks = mddev->raid_disks - mddev->delta_disks; mddev 7234 drivers/md/raid5.c here_new = mddev->reshape_position; mddev 7235 drivers/md/raid5.c chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors); mddev 7236 drivers/md/raid5.c new_data_disks = mddev->raid_disks - max_degraded; mddev 7239 drivers/md/raid5.c mdname(mddev)); mddev 7244 drivers/md/raid5.c here_old = mddev->reshape_position; mddev 7248 drivers/md/raid5.c if (mddev->delta_disks == 0) { mddev 7256 drivers/md/raid5.c if (abs(min_offset_diff) >= mddev->chunk_sectors && mddev 7257 drivers/md/raid5.c abs(min_offset_diff) >= mddev->new_chunk_sectors) mddev 7259 drivers/md/raid5.c else if (mddev->ro == 0) { mddev 7261 drivers/md/raid5.c mdname(mddev)); mddev 7264 drivers/md/raid5.c } else if (mddev->reshape_backwards mddev 7271 drivers/md/raid5.c mdname(mddev)); mddev 7274 drivers/md/raid5.c pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); mddev 7277 drivers/md/raid5.c BUG_ON(mddev->level != mddev->new_level); mddev 7278 drivers/md/raid5.c BUG_ON(mddev->layout != mddev->new_layout); mddev 7279 drivers/md/raid5.c BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); mddev 7280 drivers/md/raid5.c BUG_ON(mddev->delta_disks != 0); mddev 7283 drivers/md/raid5.c if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && mddev 7284 drivers/md/raid5.c test_bit(MD_HAS_PPL, &mddev->flags)) { mddev 7286 drivers/md/raid5.c mdname(mddev)); mddev 7287 drivers/md/raid5.c clear_bit(MD_HAS_PPL, &mddev->flags); mddev 7288 drivers/md/raid5.c clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags); mddev 7291 drivers/md/raid5.c if (mddev->private == NULL) mddev 7292 drivers/md/raid5.c conf = setup_conf(mddev); mddev 7294 drivers/md/raid5.c conf = mddev->private; mddev 7299 drivers/md/raid5.c if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { mddev 7302 drivers/md/raid5.c mdname(mddev)); mddev 7303 drivers/md/raid5.c mddev->ro = 1; mddev 7304 drivers/md/raid5.c set_disk_ro(mddev->gendisk, 1); mddev 7305 drivers/md/raid5.c } else if (mddev->recovery_cp == MaxSector) mddev 7306 drivers/md/raid5.c set_bit(MD_JOURNAL_CLEAN, &mddev->flags); mddev 7310 drivers/md/raid5.c mddev->thread = conf->thread; mddev 7312 drivers/md/raid5.c mddev->private = conf; mddev 7345 drivers/md/raid5.c if (mddev->major_version == 0 && mddev 7346 drivers/md/raid5.c mddev->minor_version > 90) mddev 7368 drivers/md/raid5.c mddev->degraded = raid5_calc_degraded(conf); mddev 7372 drivers/md/raid5.c mdname(mddev), mddev->degraded, conf->raid_disks); mddev 7377 drivers/md/raid5.c mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); mddev 7378 drivers/md/raid5.c mddev->resync_max_sectors = mddev->dev_sectors; mddev 7380 drivers/md/raid5.c if (mddev->degraded > dirty_parity_disks && mddev 7381 drivers/md/raid5.c mddev->recovery_cp != MaxSector) { mddev 7382 drivers/md/raid5.c if (test_bit(MD_HAS_PPL, &mddev->flags)) mddev 7384 drivers/md/raid5.c mdname(mddev)); mddev 7385 drivers/md/raid5.c else if (mddev->ok_start_degraded) mddev 7387 drivers/md/raid5.c mdname(mddev)); mddev 7390 drivers/md/raid5.c mdname(mddev)); mddev 7396 drivers/md/raid5.c mdname(mddev), conf->level, mddev 7397 drivers/md/raid5.c mddev->raid_disks-mddev->degraded, mddev->raid_disks, mddev 7398 drivers/md/raid5.c mddev->new_layout); mddev 7405 drivers/md/raid5.c clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 7406 drivers/md/raid5.c clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); mddev 7407 drivers/md/raid5.c set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); mddev 7408 drivers/md/raid5.c set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev 7409 drivers/md/raid5.c mddev->sync_thread = md_register_thread(md_do_sync, mddev, mddev 7411 drivers/md/raid5.c if (!mddev->sync_thread) mddev 7416 drivers/md/raid5.c if (mddev->to_remove == &raid5_attrs_group) mddev 7417 drivers/md/raid5.c mddev->to_remove = NULL; mddev 7418 drivers/md/raid5.c else if (mddev->kobj.sd && mddev 7419 drivers/md/raid5.c sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) mddev 7421 drivers/md/raid5.c mdname(mddev)); mddev 7422 drivers/md/raid5.c md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); mddev 7424 drivers/md/raid5.c if (mddev->queue) { mddev 7432 drivers/md/raid5.c ((mddev->chunk_sectors << 9) / PAGE_SIZE); mddev 7433 drivers/md/raid5.c if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) mddev 7434 drivers/md/raid5.c mddev->queue->backing_dev_info->ra_pages = 2 * stripe; mddev 7436 drivers/md/raid5.c chunk_size = mddev->chunk_sectors << 9; mddev 7437 drivers/md/raid5.c blk_queue_io_min(mddev->queue, chunk_size); mddev 7438 drivers/md/raid5.c blk_queue_io_opt(mddev->queue, chunk_size * mddev 7440 drivers/md/raid5.c mddev->queue->limits.raid_partial_stripes_expensive = 1; mddev 7450 drivers/md/raid5.c mddev->queue->limits.discard_alignment = stripe; mddev 7451 drivers/md/raid5.c mddev->queue->limits.discard_granularity = stripe; mddev 7453 drivers/md/raid5.c blk_queue_max_write_same_sectors(mddev->queue, 0); mddev 7454 drivers/md/raid5.c blk_queue_max_write_zeroes_sectors(mddev->queue, 0); mddev 7456 drivers/md/raid5.c rdev_for_each(rdev, mddev) { mddev 7457 drivers/md/raid5.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 7459 drivers/md/raid5.c disk_stack_limits(mddev->gendisk, rdev->bdev, mddev 7479 drivers/md/raid5.c mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && mddev 7480 drivers/md/raid5.c mddev->queue->limits.discard_granularity >= stripe) mddev 7482 drivers/md/raid5.c mddev->queue); mddev 7485 drivers/md/raid5.c mddev->queue); mddev 7487 drivers/md/raid5.c blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); mddev 7495 drivers/md/raid5.c md_unregister_thread(&mddev->thread); mddev 7498 drivers/md/raid5.c mddev->private = NULL; mddev 7499 drivers/md/raid5.c pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); mddev 7503 drivers/md/raid5.c static void raid5_free(struct mddev *mddev, void *priv) mddev 7508 drivers/md/raid5.c mddev->to_remove = &raid5_attrs_group; mddev 7511 drivers/md/raid5.c static void raid5_status(struct seq_file *seq, struct mddev *mddev) mddev 7513 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 7516 drivers/md/raid5.c seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev 7517 drivers/md/raid5.c conf->chunk_sectors / 2, mddev->layout); mddev 7518 drivers/md/raid5.c seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); mddev 7540 drivers/md/raid5.c conf->raid_disks - conf->mddev->degraded); mddev 7552 drivers/md/raid5.c static int raid5_spare_active(struct mddev *mddev) mddev 7555 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 7589 drivers/md/raid5.c mddev->degraded = raid5_calc_degraded(conf); mddev 7595 drivers/md/raid5.c static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) mddev 7597 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 7639 drivers/md/raid5.c mddev->recovery_disabled != conf->recovery_disabled && mddev 7680 drivers/md/raid5.c static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) mddev 7682 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 7708 drivers/md/raid5.c if (mddev->recovery_disabled == conf->recovery_disabled) mddev 7759 drivers/md/raid5.c static int raid5_resize(struct mddev *mddev, sector_t sectors) mddev 7769 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 7774 drivers/md/raid5.c newsize = raid5_size(mddev, sectors, mddev->raid_disks); mddev 7775 drivers/md/raid5.c if (mddev->external_size && mddev 7776 drivers/md/raid5.c mddev->array_sectors > newsize) mddev 7778 drivers/md/raid5.c if (mddev->bitmap) { mddev 7779 drivers/md/raid5.c int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0); mddev 7783 drivers/md/raid5.c md_set_array_sectors(mddev, newsize); mddev 7784 drivers/md/raid5.c if (sectors > mddev->dev_sectors && mddev 7785 drivers/md/raid5.c mddev->recovery_cp > mddev->dev_sectors) { mddev 7786 drivers/md/raid5.c mddev->recovery_cp = mddev->dev_sectors; mddev 7787 drivers/md/raid5.c set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); mddev 7789 drivers/md/raid5.c mddev->dev_sectors = sectors; mddev 7790 drivers/md/raid5.c mddev->resync_max_sectors = sectors; mddev 7794 drivers/md/raid5.c static int check_stripe_cache(struct mddev *mddev) mddev 7804 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 7805 drivers/md/raid5.c if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 mddev 7807 drivers/md/raid5.c ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 mddev 7810 drivers/md/raid5.c mdname(mddev), mddev 7811 drivers/md/raid5.c ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) mddev 7818 drivers/md/raid5.c static int check_reshape(struct mddev *mddev) mddev 7820 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 7824 drivers/md/raid5.c if (mddev->delta_disks == 0 && mddev 7825 drivers/md/raid5.c mddev->new_layout == mddev->layout && mddev 7826 drivers/md/raid5.c mddev->new_chunk_sectors == mddev->chunk_sectors) mddev 7830 drivers/md/raid5.c if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { mddev 7837 drivers/md/raid5.c if (mddev->level == 6) mddev 7839 drivers/md/raid5.c if (mddev->raid_disks + mddev->delta_disks < min) mddev 7843 drivers/md/raid5.c if (!check_stripe_cache(mddev)) mddev 7846 drivers/md/raid5.c if (mddev->new_chunk_sectors > mddev->chunk_sectors || mddev 7847 drivers/md/raid5.c mddev->delta_disks > 0) mddev 7850 drivers/md/raid5.c + max(0, mddev->delta_disks), mddev 7851 drivers/md/raid5.c max(mddev->new_chunk_sectors, mddev 7852 drivers/md/raid5.c mddev->chunk_sectors) mddev 7856 drivers/md/raid5.c if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size) mddev 7859 drivers/md/raid5.c + mddev->delta_disks)); mddev 7862 drivers/md/raid5.c static int raid5_start_reshape(struct mddev *mddev) mddev 7864 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 7869 drivers/md/raid5.c if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) mddev 7872 drivers/md/raid5.c if (!check_stripe_cache(mddev)) mddev 7878 drivers/md/raid5.c rdev_for_each(rdev, mddev) { mddev 7884 drivers/md/raid5.c if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) mddev 7894 drivers/md/raid5.c if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) mddev 7895 drivers/md/raid5.c < mddev->array_sectors) { mddev 7897 drivers/md/raid5.c mdname(mddev)); mddev 7905 drivers/md/raid5.c conf->raid_disks += mddev->delta_disks; mddev 7907 drivers/md/raid5.c conf->chunk_sectors = mddev->new_chunk_sectors; mddev 7909 drivers/md/raid5.c conf->algorithm = mddev->new_layout; mddev 7915 drivers/md/raid5.c if (mddev->reshape_backwards) mddev 7916 drivers/md/raid5.c conf->reshape_progress = raid5_size(mddev, 0, 0); mddev 7927 drivers/md/raid5.c mddev_suspend(mddev); mddev 7928 drivers/md/raid5.c mddev_resume(mddev); mddev 7937 drivers/md/raid5.c if (mddev->delta_disks >= 0) { mddev 7938 drivers/md/raid5.c rdev_for_each(rdev, mddev) mddev 7941 drivers/md/raid5.c if (raid5_add_disk(mddev, rdev) == 0) { mddev 7948 drivers/md/raid5.c if (sysfs_link_rdev(mddev, rdev)) mddev 7962 drivers/md/raid5.c mddev->degraded = raid5_calc_degraded(conf); mddev 7965 drivers/md/raid5.c mddev->raid_disks = conf->raid_disks; mddev 7966 drivers/md/raid5.c mddev->reshape_position = conf->reshape_progress; mddev 7967 drivers/md/raid5.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 7969 drivers/md/raid5.c clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); mddev 7970 drivers/md/raid5.c clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); mddev 7971 drivers/md/raid5.c clear_bit(MD_RECOVERY_DONE, &mddev->recovery); mddev 7972 drivers/md/raid5.c set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); mddev 7973 drivers/md/raid5.c set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); mddev 7974 drivers/md/raid5.c mddev->sync_thread = md_register_thread(md_do_sync, mddev, mddev 7976 drivers/md/raid5.c if (!mddev->sync_thread) { mddev 7977 drivers/md/raid5.c mddev->recovery = 0; mddev 7980 drivers/md/raid5.c mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; mddev 7981 drivers/md/raid5.c mddev->new_chunk_sectors = mddev 7983 drivers/md/raid5.c mddev->new_layout = conf->algorithm = conf->prev_algo; mddev 7984 drivers/md/raid5.c rdev_for_each(rdev, mddev) mddev 7989 drivers/md/raid5.c mddev->reshape_position = MaxSector; mddev 7995 drivers/md/raid5.c md_wakeup_thread(mddev->sync_thread); mddev 7996 drivers/md/raid5.c md_new_event(mddev); mddev 8006 drivers/md/raid5.c if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { mddev 8011 drivers/md/raid5.c md_finish_reshape(conf->mddev); mddev 8014 drivers/md/raid5.c conf->mddev->reshape_position = MaxSector; mddev 8015 drivers/md/raid5.c rdev_for_each(rdev, conf->mddev) mddev 8026 drivers/md/raid5.c if (conf->mddev->queue) { mddev 8030 drivers/md/raid5.c if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) mddev 8031 drivers/md/raid5.c conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; mddev 8039 drivers/md/raid5.c static void raid5_finish_reshape(struct mddev *mddev) mddev 8041 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 8043 drivers/md/raid5.c if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { mddev 8045 drivers/md/raid5.c if (mddev->delta_disks <= 0) { mddev 8048 drivers/md/raid5.c mddev->degraded = raid5_calc_degraded(conf); mddev 8051 drivers/md/raid5.c d < conf->raid_disks - mddev->delta_disks; mddev 8061 drivers/md/raid5.c mddev->layout = conf->algorithm; mddev 8062 drivers/md/raid5.c mddev->chunk_sectors = conf->chunk_sectors; mddev 8063 drivers/md/raid5.c mddev->reshape_position = MaxSector; mddev 8064 drivers/md/raid5.c mddev->delta_disks = 0; mddev 8065 drivers/md/raid5.c mddev->reshape_backwards = 0; mddev 8069 drivers/md/raid5.c static void raid5_quiesce(struct mddev *mddev, int quiesce) mddev 8071 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 8101 drivers/md/raid5.c static void *raid45_takeover_raid0(struct mddev *mddev, int level) mddev 8103 drivers/md/raid5.c struct r0conf *raid0_conf = mddev->private; mddev 8109 drivers/md/raid5.c mdname(mddev)); mddev 8115 drivers/md/raid5.c mddev->dev_sectors = sectors; mddev 8116 drivers/md/raid5.c mddev->new_level = level; mddev 8117 drivers/md/raid5.c mddev->new_layout = ALGORITHM_PARITY_N; mddev 8118 drivers/md/raid5.c mddev->new_chunk_sectors = mddev->chunk_sectors; mddev 8119 drivers/md/raid5.c mddev->raid_disks += 1; mddev 8120 drivers/md/raid5.c mddev->delta_disks = 1; mddev 8122 drivers/md/raid5.c mddev->recovery_cp = MaxSector; mddev 8124 drivers/md/raid5.c return setup_conf(mddev); mddev 8127 drivers/md/raid5.c static void *raid5_takeover_raid1(struct mddev *mddev) mddev 8132 drivers/md/raid5.c if (mddev->raid_disks != 2 || mddev 8133 drivers/md/raid5.c mddev->degraded > 1) mddev 8141 drivers/md/raid5.c while (chunksect && (mddev->array_sectors & (chunksect-1))) mddev 8148 drivers/md/raid5.c mddev->new_level = 5; mddev 8149 drivers/md/raid5.c mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; mddev 8150 drivers/md/raid5.c mddev->new_chunk_sectors = chunksect; mddev 8152 drivers/md/raid5.c ret = setup_conf(mddev); mddev 8154 drivers/md/raid5.c mddev_clear_unsupported_flags(mddev, mddev 8159 drivers/md/raid5.c static void *raid5_takeover_raid6(struct mddev *mddev) mddev 8163 drivers/md/raid5.c switch (mddev->layout) { mddev 8185 drivers/md/raid5.c mddev->new_level = 5; mddev 8186 drivers/md/raid5.c mddev->new_layout = new_layout; mddev 8187 drivers/md/raid5.c mddev->delta_disks = -1; mddev 8188 drivers/md/raid5.c mddev->raid_disks -= 1; mddev 8189 drivers/md/raid5.c return setup_conf(mddev); mddev 8192 drivers/md/raid5.c static int raid5_check_reshape(struct mddev *mddev) mddev 8199 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 8200 drivers/md/raid5.c int new_chunk = mddev->new_chunk_sectors; mddev 8202 drivers/md/raid5.c if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) mddev 8209 drivers/md/raid5.c if (mddev->array_sectors & (new_chunk-1)) mddev 8216 drivers/md/raid5.c if (mddev->raid_disks == 2) { mddev 8218 drivers/md/raid5.c if (mddev->new_layout >= 0) { mddev 8219 drivers/md/raid5.c conf->algorithm = mddev->new_layout; mddev 8220 drivers/md/raid5.c mddev->layout = mddev->new_layout; mddev 8224 drivers/md/raid5.c mddev->chunk_sectors = new_chunk; mddev 8226 drivers/md/raid5.c set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev 8227 drivers/md/raid5.c md_wakeup_thread(mddev->thread); mddev 8229 drivers/md/raid5.c return check_reshape(mddev); mddev 8232 drivers/md/raid5.c static int raid6_check_reshape(struct mddev *mddev) mddev 8234 drivers/md/raid5.c int new_chunk = mddev->new_chunk_sectors; mddev 8236 drivers/md/raid5.c if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) mddev 8243 drivers/md/raid5.c if (mddev->array_sectors & (new_chunk-1)) mddev 8249 drivers/md/raid5.c return check_reshape(mddev); mddev 8252 drivers/md/raid5.c static void *raid5_takeover(struct mddev *mddev) mddev 8260 drivers/md/raid5.c if (mddev->level == 0) mddev 8261 drivers/md/raid5.c return raid45_takeover_raid0(mddev, 5); mddev 8262 drivers/md/raid5.c if (mddev->level == 1) mddev 8263 drivers/md/raid5.c return raid5_takeover_raid1(mddev); mddev 8264 drivers/md/raid5.c if (mddev->level == 4) { mddev 8265 drivers/md/raid5.c mddev->new_layout = ALGORITHM_PARITY_N; mddev 8266 drivers/md/raid5.c mddev->new_level = 5; mddev 8267 drivers/md/raid5.c return setup_conf(mddev); mddev 8269 drivers/md/raid5.c if (mddev->level == 6) mddev 8270 drivers/md/raid5.c return raid5_takeover_raid6(mddev); mddev 8275 drivers/md/raid5.c static void *raid4_takeover(struct mddev *mddev) mddev 8281 drivers/md/raid5.c if (mddev->level == 0) mddev 8282 drivers/md/raid5.c return raid45_takeover_raid0(mddev, 4); mddev 8283 drivers/md/raid5.c if (mddev->level == 5 && mddev 8284 drivers/md/raid5.c mddev->layout == ALGORITHM_PARITY_N) { mddev 8285 drivers/md/raid5.c mddev->new_layout = 0; mddev 8286 drivers/md/raid5.c mddev->new_level = 4; mddev 8287 drivers/md/raid5.c return setup_conf(mddev); mddev 8294 drivers/md/raid5.c static void *raid6_takeover(struct mddev *mddev) mddev 8302 drivers/md/raid5.c if (mddev->pers != &raid5_personality) mddev 8304 drivers/md/raid5.c if (mddev->degraded > 1) mddev 8306 drivers/md/raid5.c if (mddev->raid_disks > 253) mddev 8308 drivers/md/raid5.c if (mddev->raid_disks < 3) mddev 8311 drivers/md/raid5.c switch (mddev->layout) { mddev 8333 drivers/md/raid5.c mddev->new_level = 6; mddev 8334 drivers/md/raid5.c mddev->new_layout = new_layout; mddev 8335 drivers/md/raid5.c mddev->delta_disks = 1; mddev 8336 drivers/md/raid5.c mddev->raid_disks += 1; mddev 8337 drivers/md/raid5.c return setup_conf(mddev); mddev 8340 drivers/md/raid5.c static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf) mddev 8345 drivers/md/raid5.c err = mddev_lock(mddev); mddev 8348 drivers/md/raid5.c conf = mddev->private; mddev 8350 drivers/md/raid5.c mddev_unlock(mddev); mddev 8367 drivers/md/raid5.c mddev_suspend(mddev); mddev 8369 drivers/md/raid5.c mddev_resume(mddev); mddev 8371 drivers/md/raid5.c } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) && mddev 8376 drivers/md/raid5.c rdev_for_each(rdev, mddev) mddev 8383 drivers/md/raid5.c mddev_suspend(mddev); mddev 8384 drivers/md/raid5.c clear_bit(MD_HAS_JOURNAL, &mddev->flags); mddev 8385 drivers/md/raid5.c mddev_resume(mddev); mddev 8395 drivers/md/raid5.c md_update_sb(mddev, 1); mddev 8397 drivers/md/raid5.c mddev_unlock(mddev); mddev 8402 drivers/md/raid5.c static int raid5_start(struct mddev *mddev) mddev 8404 drivers/md/raid5.c struct r5conf *conf = mddev->private; mddev 570 drivers/md/raid5.h struct mddev *mddev; mddev 756 drivers/md/raid5.h extern int raid5_set_cache_size(struct mddev *mddev, int size); mddev 766 drivers/md/raid5.h extern int r5c_journal_mode_set(struct mddev *mddev, int journal_mode);