Lines Matching refs:sctx
95 struct scrub_ctx *sctx; member
116 struct scrub_ctx *sctx; member
133 struct scrub_ctx *sctx; member
209 struct scrub_ctx *sctx; member
225 struct scrub_ctx *sctx; member
243 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
244 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
245 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
246 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
271 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
273 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
285 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
291 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
293 static void scrub_wr_submit(struct scrub_ctx *sctx);
296 static int write_page_nocow(struct scrub_ctx *sctx,
300 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
305 static void scrub_put_ctx(struct scrub_ctx *sctx);
308 static void scrub_pending_bio_inc(struct scrub_ctx *sctx) in scrub_pending_bio_inc() argument
310 atomic_inc(&sctx->refs); in scrub_pending_bio_inc()
311 atomic_inc(&sctx->bios_in_flight); in scrub_pending_bio_inc()
314 static void scrub_pending_bio_dec(struct scrub_ctx *sctx) in scrub_pending_bio_dec() argument
316 atomic_dec(&sctx->bios_in_flight); in scrub_pending_bio_dec()
317 wake_up(&sctx->list_wait); in scrub_pending_bio_dec()
318 scrub_put_ctx(sctx); in scrub_pending_bio_dec()
357 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx) in scrub_pending_trans_workers_inc() argument
359 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in scrub_pending_trans_workers_inc()
361 atomic_inc(&sctx->refs); in scrub_pending_trans_workers_inc()
385 atomic_inc(&sctx->workers_pending); in scrub_pending_trans_workers_inc()
389 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx) in scrub_pending_trans_workers_dec() argument
391 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in scrub_pending_trans_workers_dec()
401 atomic_dec(&sctx->workers_pending); in scrub_pending_trans_workers_dec()
403 wake_up(&sctx->list_wait); in scrub_pending_trans_workers_dec()
404 scrub_put_ctx(sctx); in scrub_pending_trans_workers_dec()
407 static void scrub_free_csums(struct scrub_ctx *sctx) in scrub_free_csums() argument
409 while (!list_empty(&sctx->csum_list)) { in scrub_free_csums()
411 sum = list_first_entry(&sctx->csum_list, in scrub_free_csums()
418 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) in scrub_free_ctx() argument
422 if (!sctx) in scrub_free_ctx()
425 scrub_free_wr_ctx(&sctx->wr_ctx); in scrub_free_ctx()
428 if (sctx->curr != -1) { in scrub_free_ctx()
429 struct scrub_bio *sbio = sctx->bios[sctx->curr]; in scrub_free_ctx()
439 struct scrub_bio *sbio = sctx->bios[i]; in scrub_free_ctx()
446 scrub_free_csums(sctx); in scrub_free_ctx()
447 kfree(sctx); in scrub_free_ctx()
450 static void scrub_put_ctx(struct scrub_ctx *sctx) in scrub_put_ctx() argument
452 if (atomic_dec_and_test(&sctx->refs)) in scrub_put_ctx()
453 scrub_free_ctx(sctx); in scrub_put_ctx()
459 struct scrub_ctx *sctx; in scrub_setup_ctx() local
464 sctx = kzalloc(sizeof(*sctx), GFP_NOFS); in scrub_setup_ctx()
465 if (!sctx) in scrub_setup_ctx()
467 atomic_set(&sctx->refs, 1); in scrub_setup_ctx()
468 sctx->is_dev_replace = is_dev_replace; in scrub_setup_ctx()
469 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; in scrub_setup_ctx()
470 sctx->curr = -1; in scrub_setup_ctx()
471 sctx->dev_root = dev->dev_root; in scrub_setup_ctx()
478 sctx->bios[i] = sbio; in scrub_setup_ctx()
481 sbio->sctx = sctx; in scrub_setup_ctx()
487 sctx->bios[i]->next_free = i + 1; in scrub_setup_ctx()
489 sctx->bios[i]->next_free = -1; in scrub_setup_ctx()
491 sctx->first_free = 0; in scrub_setup_ctx()
492 sctx->nodesize = dev->dev_root->nodesize; in scrub_setup_ctx()
493 sctx->sectorsize = dev->dev_root->sectorsize; in scrub_setup_ctx()
494 atomic_set(&sctx->bios_in_flight, 0); in scrub_setup_ctx()
495 atomic_set(&sctx->workers_pending, 0); in scrub_setup_ctx()
496 atomic_set(&sctx->cancel_req, 0); in scrub_setup_ctx()
497 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy); in scrub_setup_ctx()
498 INIT_LIST_HEAD(&sctx->csum_list); in scrub_setup_ctx()
500 spin_lock_init(&sctx->list_lock); in scrub_setup_ctx()
501 spin_lock_init(&sctx->stat_lock); in scrub_setup_ctx()
502 init_waitqueue_head(&sctx->list_wait); in scrub_setup_ctx()
504 ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info, in scrub_setup_ctx()
507 scrub_free_ctx(sctx); in scrub_setup_ctx()
510 return sctx; in scrub_setup_ctx()
513 scrub_free_ctx(sctx); in scrub_setup_ctx()
619 fs_info = sblock->sctx->dev_root->fs_info; in scrub_print_warning()
793 struct scrub_ctx *sctx; in scrub_fixup_nodatasum() local
799 sctx = fixup->sctx; in scrub_fixup_nodatasum()
803 spin_lock(&sctx->stat_lock); in scrub_fixup_nodatasum()
804 ++sctx->stat.malloc_errors; in scrub_fixup_nodatasum()
805 spin_unlock(&sctx->stat_lock); in scrub_fixup_nodatasum()
834 spin_lock(&sctx->stat_lock); in scrub_fixup_nodatasum()
835 ++sctx->stat.corrected_errors; in scrub_fixup_nodatasum()
836 spin_unlock(&sctx->stat_lock); in scrub_fixup_nodatasum()
842 spin_lock(&sctx->stat_lock); in scrub_fixup_nodatasum()
843 ++sctx->stat.uncorrectable_errors; in scrub_fixup_nodatasum()
844 spin_unlock(&sctx->stat_lock); in scrub_fixup_nodatasum()
846 &sctx->dev_root->fs_info->dev_replace. in scrub_fixup_nodatasum()
848 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info, in scrub_fixup_nodatasum()
856 scrub_pending_trans_workers_dec(sctx); in scrub_fixup_nodatasum()
882 struct scrub_ctx *sctx = sblock_to_check->sctx; in scrub_handle_errored_block() local
900 fs_info = sctx->dev_root->fs_info; in scrub_handle_errored_block()
907 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
908 ++sctx->stat.super_errors; in scrub_handle_errored_block()
909 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
921 if (sctx->is_dev_replace && !is_metadata && !have_csum) { in scrub_handle_errored_block()
958 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
959 sctx->stat.malloc_errors++; in scrub_handle_errored_block()
960 sctx->stat.read_errors++; in scrub_handle_errored_block()
961 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
962 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
970 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
971 sctx->stat.read_errors++; in scrub_handle_errored_block()
972 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
973 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
993 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
994 sctx->stat.unverified_errors++; in scrub_handle_errored_block()
996 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
998 if (sctx->is_dev_replace) in scrub_handle_errored_block()
1004 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1005 sctx->stat.read_errors++; in scrub_handle_errored_block()
1006 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1011 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1012 sctx->stat.csum_errors++; in scrub_handle_errored_block()
1013 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1019 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1020 sctx->stat.verify_errors++; in scrub_handle_errored_block()
1021 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1033 if (sctx->readonly) { in scrub_handle_errored_block()
1034 ASSERT(!sctx->is_dev_replace); in scrub_handle_errored_block()
1041 WARN_ON(sctx->is_dev_replace); in scrub_handle_errored_block()
1055 fixup_nodatasum->sctx = sctx; in scrub_handle_errored_block()
1060 scrub_pending_trans_workers_inc(sctx); in scrub_handle_errored_block()
1099 if (sctx->is_dev_replace) { in scrub_handle_errored_block()
1111 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) in scrub_handle_errored_block()
1145 if (!page_bad->io_error && !sctx->is_dev_replace) in scrub_handle_errored_block()
1165 if (sctx->is_dev_replace) { in scrub_handle_errored_block()
1179 &sctx->dev_root-> in scrub_handle_errored_block()
1195 if (success && !sctx->is_dev_replace) { in scrub_handle_errored_block()
1215 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1216 sctx->stat.corrected_errors++; in scrub_handle_errored_block()
1218 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1225 spin_lock(&sctx->stat_lock); in scrub_handle_errored_block()
1226 sctx->stat.uncorrectable_errors++; in scrub_handle_errored_block()
1227 spin_unlock(&sctx->stat_lock); in scrub_handle_errored_block()
1303 struct scrub_ctx *sctx = original_sblock->sctx; in scrub_setup_recheck_block() local
1304 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in scrub_setup_recheck_block()
1363 sblock->sctx = sctx; in scrub_setup_recheck_block()
1368 spin_lock(&sctx->stat_lock); in scrub_setup_recheck_block()
1369 sctx->stat.malloc_errors++; in scrub_setup_recheck_block()
1370 spin_unlock(&sctx->stat_lock); in scrub_setup_recheck_block()
1384 sctx->csum_size); in scrub_setup_recheck_block()
1577 btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info, in scrub_repair_page_from_good_copy()
1599 &sblock_bad->sctx->dev_root->fs_info-> in scrub_repair_page_from_good_copy()
1627 &sblock->sctx->dev_root->fs_info->dev_replace. in scrub_write_block_to_dev_replace()
1645 return scrub_add_page_to_wr_bio(sblock->sctx, spage); in scrub_write_page_to_dev_replace()
1648 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, in scrub_add_page_to_wr_bio() argument
1651 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; in scrub_add_page_to_wr_bio()
1664 wr_ctx->wr_curr_bio->sctx = sctx; in scrub_add_page_to_wr_bio()
1693 scrub_wr_submit(sctx); in scrub_add_page_to_wr_bio()
1705 scrub_wr_submit(sctx); in scrub_add_page_to_wr_bio()
1713 scrub_wr_submit(sctx); in scrub_add_page_to_wr_bio()
1719 static void scrub_wr_submit(struct scrub_ctx *sctx) in scrub_wr_submit() argument
1721 struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx; in scrub_wr_submit()
1730 scrub_pending_bio_inc(sctx); in scrub_wr_submit()
1754 struct scrub_ctx *sctx = sbio->sctx; in scrub_wr_bio_end_io_worker() local
1760 &sbio->sctx->dev_root->fs_info->dev_replace; in scrub_wr_bio_end_io_worker()
1776 scrub_pending_bio_dec(sctx); in scrub_wr_bio_end_io_worker()
1815 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_data() local
1832 len = sctx->sectorsize; in scrub_checksum_data()
1850 if (memcmp(csum, on_disk_csum, sctx->csum_size)) in scrub_checksum_data()
1858 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_tree_block() local
1860 struct btrfs_root *root = sctx->dev_root; in scrub_checksum_tree_block()
1876 memcpy(on_disk_csum, h->csum, sctx->csum_size); in scrub_checksum_tree_block()
1898 len = sctx->nodesize - BTRFS_CSUM_SIZE; in scrub_checksum_tree_block()
1920 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) in scrub_checksum_tree_block()
1929 struct scrub_ctx *sctx = sblock->sctx; in scrub_checksum_super() local
1946 memcpy(on_disk_csum, s->csum, sctx->csum_size); in scrub_checksum_super()
1979 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size)) in scrub_checksum_super()
1988 spin_lock(&sctx->stat_lock); in scrub_checksum_super()
1989 ++sctx->stat.super_errors; in scrub_checksum_super()
1990 spin_unlock(&sctx->stat_lock); in scrub_checksum_super()
2035 static void scrub_submit(struct scrub_ctx *sctx) in scrub_submit() argument
2039 if (sctx->curr == -1) in scrub_submit()
2042 sbio = sctx->bios[sctx->curr]; in scrub_submit()
2043 sctx->curr = -1; in scrub_submit()
2044 scrub_pending_bio_inc(sctx); in scrub_submit()
2048 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, in scrub_add_page_to_rd_bio() argument
2059 while (sctx->curr == -1) { in scrub_add_page_to_rd_bio()
2060 spin_lock(&sctx->list_lock); in scrub_add_page_to_rd_bio()
2061 sctx->curr = sctx->first_free; in scrub_add_page_to_rd_bio()
2062 if (sctx->curr != -1) { in scrub_add_page_to_rd_bio()
2063 sctx->first_free = sctx->bios[sctx->curr]->next_free; in scrub_add_page_to_rd_bio()
2064 sctx->bios[sctx->curr]->next_free = -1; in scrub_add_page_to_rd_bio()
2065 sctx->bios[sctx->curr]->page_count = 0; in scrub_add_page_to_rd_bio()
2066 spin_unlock(&sctx->list_lock); in scrub_add_page_to_rd_bio()
2068 spin_unlock(&sctx->list_lock); in scrub_add_page_to_rd_bio()
2069 wait_event(sctx->list_wait, sctx->first_free != -1); in scrub_add_page_to_rd_bio()
2072 sbio = sctx->bios[sctx->curr]; in scrub_add_page_to_rd_bio()
2081 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); in scrub_add_page_to_rd_bio()
2097 scrub_submit(sctx); in scrub_add_page_to_rd_bio()
2109 scrub_submit(sctx); in scrub_add_page_to_rd_bio()
2116 if (sbio->page_count == sctx->pages_per_rd_bio) in scrub_add_page_to_rd_bio()
2117 scrub_submit(sctx); in scrub_add_page_to_rd_bio()
2125 struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info; in scrub_missing_raid56_end_io()
2136 struct scrub_ctx *sctx = sblock->sctx; in scrub_missing_raid56_worker() local
2147 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2148 sctx->stat.read_errors++; in scrub_missing_raid56_worker()
2149 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2150 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info, in scrub_missing_raid56_worker()
2154 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2155 sctx->stat.uncorrectable_errors++; in scrub_missing_raid56_worker()
2156 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_worker()
2157 btrfs_err_rl_in_rcu(sctx->dev_root->fs_info, in scrub_missing_raid56_worker()
2166 if (sctx->is_dev_replace && in scrub_missing_raid56_worker()
2167 atomic_read(&sctx->wr_ctx.flush_all_writes)) { in scrub_missing_raid56_worker()
2168 mutex_lock(&sctx->wr_ctx.wr_lock); in scrub_missing_raid56_worker()
2169 scrub_wr_submit(sctx); in scrub_missing_raid56_worker()
2170 mutex_unlock(&sctx->wr_ctx.wr_lock); in scrub_missing_raid56_worker()
2173 scrub_pending_bio_dec(sctx); in scrub_missing_raid56_worker()
2178 struct scrub_ctx *sctx = sblock->sctx; in scrub_missing_raid56_pages() local
2179 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in scrub_missing_raid56_pages()
2193 if (WARN_ON(!sctx->is_dev_replace || in scrub_missing_raid56_pages()
2212 rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length); in scrub_missing_raid56_pages()
2225 scrub_pending_bio_inc(sctx); in scrub_missing_raid56_pages()
2233 spin_lock(&sctx->stat_lock); in scrub_missing_raid56_pages()
2234 sctx->stat.malloc_errors++; in scrub_missing_raid56_pages()
2235 spin_unlock(&sctx->stat_lock); in scrub_missing_raid56_pages()
2238 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, in scrub_pages() argument
2248 spin_lock(&sctx->stat_lock); in scrub_pages()
2249 sctx->stat.malloc_errors++; in scrub_pages()
2250 spin_unlock(&sctx->stat_lock); in scrub_pages()
2257 sblock->sctx = sctx; in scrub_pages()
2267 spin_lock(&sctx->stat_lock); in scrub_pages()
2268 sctx->stat.malloc_errors++; in scrub_pages()
2269 spin_unlock(&sctx->stat_lock); in scrub_pages()
2286 memcpy(spage->csum, csum, sctx->csum_size); in scrub_pages()
2312 ret = scrub_add_page_to_rd_bio(sctx, spage); in scrub_pages()
2320 scrub_submit(sctx); in scrub_pages()
2342 struct scrub_ctx *sctx = sbio->sctx; in scrub_bio_end_io_worker() local
2367 spin_lock(&sctx->list_lock); in scrub_bio_end_io_worker()
2368 sbio->next_free = sctx->first_free; in scrub_bio_end_io_worker()
2369 sctx->first_free = sbio->index; in scrub_bio_end_io_worker()
2370 spin_unlock(&sctx->list_lock); in scrub_bio_end_io_worker()
2372 if (sctx->is_dev_replace && in scrub_bio_end_io_worker()
2373 atomic_read(&sctx->wr_ctx.flush_all_writes)) { in scrub_bio_end_io_worker()
2374 mutex_lock(&sctx->wr_ctx.wr_lock); in scrub_bio_end_io_worker()
2375 scrub_wr_submit(sctx); in scrub_bio_end_io_worker()
2376 mutex_unlock(&sctx->wr_ctx.wr_lock); in scrub_bio_end_io_worker()
2379 scrub_pending_bio_dec(sctx); in scrub_bio_end_io_worker()
2388 int sectorsize = sparity->sctx->dev_root->sectorsize; in __scrub_mark_bitmap()
2435 if (!corrupted && sblock->sctx->is_dev_replace) in scrub_block_complete()
2449 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum) in scrub_find_csum() argument
2455 while (!list_empty(&sctx->csum_list)) { in scrub_find_csum()
2456 sum = list_first_entry(&sctx->csum_list, in scrub_find_csum()
2463 ++sctx->stat.csum_discards; in scrub_find_csum()
2471 index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize; in scrub_find_csum()
2472 num_sectors = sum->len / sctx->sectorsize; in scrub_find_csum()
2473 memcpy(csum, sum->sums + index, sctx->csum_size); in scrub_find_csum()
2482 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len, in scrub_extent() argument
2491 blocksize = sctx->sectorsize; in scrub_extent()
2492 spin_lock(&sctx->stat_lock); in scrub_extent()
2493 sctx->stat.data_extents_scrubbed++; in scrub_extent()
2494 sctx->stat.data_bytes_scrubbed += len; in scrub_extent()
2495 spin_unlock(&sctx->stat_lock); in scrub_extent()
2497 blocksize = sctx->nodesize; in scrub_extent()
2498 spin_lock(&sctx->stat_lock); in scrub_extent()
2499 sctx->stat.tree_extents_scrubbed++; in scrub_extent()
2500 sctx->stat.tree_bytes_scrubbed += len; in scrub_extent()
2501 spin_unlock(&sctx->stat_lock); in scrub_extent()
2503 blocksize = sctx->sectorsize; in scrub_extent()
2513 have_csum = scrub_find_csum(sctx, logical, csum); in scrub_extent()
2515 ++sctx->stat.no_csum; in scrub_extent()
2516 if (sctx->is_dev_replace && !have_csum) { in scrub_extent()
2517 ret = copy_nocow_pages(sctx, logical, l, in scrub_extent()
2523 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen, in scrub_extent()
2542 struct scrub_ctx *sctx = sparity->sctx; in scrub_pages_for_parity() local
2548 spin_lock(&sctx->stat_lock); in scrub_pages_for_parity()
2549 sctx->stat.malloc_errors++; in scrub_pages_for_parity()
2550 spin_unlock(&sctx->stat_lock); in scrub_pages_for_parity()
2557 sblock->sctx = sctx; in scrub_pages_for_parity()
2569 spin_lock(&sctx->stat_lock); in scrub_pages_for_parity()
2570 sctx->stat.malloc_errors++; in scrub_pages_for_parity()
2571 spin_unlock(&sctx->stat_lock); in scrub_pages_for_parity()
2591 memcpy(spage->csum, csum, sctx->csum_size); in scrub_pages_for_parity()
2609 ret = scrub_add_page_to_rd_bio(sctx, spage); in scrub_pages_for_parity()
2626 struct scrub_ctx *sctx = sparity->sctx; in scrub_extent_for_parity() local
2637 blocksize = sctx->sectorsize; in scrub_extent_for_parity()
2639 blocksize = sctx->nodesize; in scrub_extent_for_parity()
2641 blocksize = sctx->sectorsize; in scrub_extent_for_parity()
2651 have_csum = scrub_find_csum(sctx, logical, csum); in scrub_extent_for_parity()
2714 struct scrub_ctx *sctx = sparity->sctx; in scrub_free_parity() local
2720 spin_lock(&sctx->stat_lock); in scrub_free_parity()
2721 sctx->stat.read_errors += nbits; in scrub_free_parity()
2722 sctx->stat.uncorrectable_errors += nbits; in scrub_free_parity()
2723 spin_unlock(&sctx->stat_lock); in scrub_free_parity()
2738 struct scrub_ctx *sctx = sparity->sctx; in scrub_parity_bio_endio_worker() local
2741 scrub_pending_bio_dec(sctx); in scrub_parity_bio_endio_worker()
2756 btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers, in scrub_parity_bio_endio()
2762 struct scrub_ctx *sctx = sparity->sctx; in scrub_parity_check_and_repair() local
2775 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE, in scrub_parity_check_and_repair()
2789 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, in scrub_parity_check_and_repair()
2799 scrub_pending_bio_inc(sctx); in scrub_parity_check_and_repair()
2809 spin_lock(&sctx->stat_lock); in scrub_parity_check_and_repair()
2810 sctx->stat.malloc_errors++; in scrub_parity_check_and_repair()
2811 spin_unlock(&sctx->stat_lock); in scrub_parity_check_and_repair()
2834 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, in scrub_raid56_parity() argument
2841 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in scrub_raid56_parity()
2868 spin_lock(&sctx->stat_lock); in scrub_raid56_parity()
2869 sctx->stat.malloc_errors++; in scrub_raid56_parity()
2870 spin_unlock(&sctx->stat_lock); in scrub_raid56_parity()
2876 sparity->sctx = sctx; in scrub_raid56_parity()
2960 spin_lock(&sctx->stat_lock); in scrub_raid56_parity()
2961 sctx->stat.uncorrectable_errors++; in scrub_raid56_parity()
2962 spin_unlock(&sctx->stat_lock); in scrub_raid56_parity()
3001 &sctx->csum_list, 1); in scrub_raid56_parity()
3012 scrub_free_csums(sctx); in scrub_raid56_parity()
3047 scrub_submit(sctx); in scrub_raid56_parity()
3048 mutex_lock(&sctx->wr_ctx.wr_lock); in scrub_raid56_parity()
3049 scrub_wr_submit(sctx); in scrub_raid56_parity()
3050 mutex_unlock(&sctx->wr_ctx.wr_lock); in scrub_raid56_parity()
3056 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, in scrub_stripe() argument
3063 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in scrub_stripe()
3156 wait_event(sctx->list_wait, in scrub_stripe()
3157 atomic_read(&sctx->bios_in_flight) == 0); in scrub_stripe()
3198 atomic_read(&sctx->cancel_req)) { in scrub_stripe()
3207 atomic_set(&sctx->wr_ctx.flush_all_writes, 1); in scrub_stripe()
3208 scrub_submit(sctx); in scrub_stripe()
3209 mutex_lock(&sctx->wr_ctx.wr_lock); in scrub_stripe()
3210 scrub_wr_submit(sctx); in scrub_stripe()
3211 mutex_unlock(&sctx->wr_ctx.wr_lock); in scrub_stripe()
3212 wait_event(sctx->list_wait, in scrub_stripe()
3213 atomic_read(&sctx->bios_in_flight) == 0); in scrub_stripe()
3214 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); in scrub_stripe()
3227 ret = scrub_raid56_parity(sctx, map, scrub_dev, in scrub_stripe()
3312 spin_lock(&sctx->stat_lock); in scrub_stripe()
3313 sctx->stat.uncorrectable_errors++; in scrub_stripe()
3314 spin_unlock(&sctx->stat_lock); in scrub_stripe()
3348 &sctx->csum_list, 1); in scrub_stripe()
3352 ret = scrub_extent(sctx, extent_logical, extent_len, in scrub_stripe()
3357 scrub_free_csums(sctx); in scrub_stripe()
3380 ret = scrub_raid56_parity(sctx, in scrub_stripe()
3409 spin_lock(&sctx->stat_lock); in scrub_stripe()
3411 sctx->stat.last_physical = map->stripes[num].physical + in scrub_stripe()
3414 sctx->stat.last_physical = physical; in scrub_stripe()
3415 spin_unlock(&sctx->stat_lock); in scrub_stripe()
3421 scrub_submit(sctx); in scrub_stripe()
3422 mutex_lock(&sctx->wr_ctx.wr_lock); in scrub_stripe()
3423 scrub_wr_submit(sctx); in scrub_stripe()
3424 mutex_unlock(&sctx->wr_ctx.wr_lock); in scrub_stripe()
3432 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx, in scrub_chunk() argument
3440 &sctx->dev_root->fs_info->mapping_tree; in scrub_chunk()
3473 ret = scrub_stripe(sctx, map, scrub_dev, i, in scrub_chunk()
3487 int scrub_enumerate_chunks(struct scrub_ctx *sctx, in scrub_enumerate_chunks() argument
3493 struct btrfs_root *root = sctx->dev_root; in scrub_enumerate_chunks()
3606 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, in scrub_enumerate_chunks()
3619 atomic_set(&sctx->wr_ctx.flush_all_writes, 1); in scrub_enumerate_chunks()
3620 scrub_submit(sctx); in scrub_enumerate_chunks()
3621 mutex_lock(&sctx->wr_ctx.wr_lock); in scrub_enumerate_chunks()
3622 scrub_wr_submit(sctx); in scrub_enumerate_chunks()
3623 mutex_unlock(&sctx->wr_ctx.wr_lock); in scrub_enumerate_chunks()
3625 wait_event(sctx->list_wait, in scrub_enumerate_chunks()
3626 atomic_read(&sctx->bios_in_flight) == 0); in scrub_enumerate_chunks()
3635 wait_event(sctx->list_wait, in scrub_enumerate_chunks()
3636 atomic_read(&sctx->workers_pending) == 0); in scrub_enumerate_chunks()
3637 atomic_set(&sctx->wr_ctx.flush_all_writes, 0); in scrub_enumerate_chunks()
3674 if (sctx->stat.malloc_errors > 0) { in scrub_enumerate_chunks()
3691 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx, in scrub_supers() argument
3698 struct btrfs_root *root = sctx->dev_root; in scrub_supers()
3715 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr, in scrub_supers()
3721 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in scrub_supers()
3791 struct scrub_ctx *sctx; in btrfs_scrub_dev() local
3880 sctx = scrub_setup_ctx(dev, is_dev_replace); in btrfs_scrub_dev()
3881 if (IS_ERR(sctx)) { in btrfs_scrub_dev()
3885 return PTR_ERR(sctx); in btrfs_scrub_dev()
3887 sctx->readonly = readonly; in btrfs_scrub_dev()
3888 dev->scrub_device = sctx; in btrfs_scrub_dev()
3905 ret = scrub_supers(sctx, dev); in btrfs_scrub_dev()
3910 ret = scrub_enumerate_chunks(sctx, dev, start, end, in btrfs_scrub_dev()
3913 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); in btrfs_scrub_dev()
3917 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); in btrfs_scrub_dev()
3920 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_dev()
3927 scrub_put_ctx(sctx); in btrfs_scrub_dev()
3981 struct scrub_ctx *sctx; in btrfs_scrub_cancel_dev() local
3984 sctx = dev->scrub_device; in btrfs_scrub_cancel_dev()
3985 if (!sctx) { in btrfs_scrub_cancel_dev()
3989 atomic_inc(&sctx->cancel_req); in btrfs_scrub_cancel_dev()
4005 struct scrub_ctx *sctx = NULL; in btrfs_scrub_progress() local
4010 sctx = dev->scrub_device; in btrfs_scrub_progress()
4011 if (sctx) in btrfs_scrub_progress()
4012 memcpy(progress, &sctx->stat, sizeof(*progress)); in btrfs_scrub_progress()
4015 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV; in btrfs_scrub_progress()
4043 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, in scrub_setup_wr_ctx() argument
4071 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, in copy_nocow_pages() argument
4075 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; in copy_nocow_pages()
4079 spin_lock(&sctx->stat_lock); in copy_nocow_pages()
4080 sctx->stat.malloc_errors++; in copy_nocow_pages()
4081 spin_unlock(&sctx->stat_lock); in copy_nocow_pages()
4085 scrub_pending_trans_workers_inc(sctx); in copy_nocow_pages()
4087 nocow_ctx->sctx = sctx; in copy_nocow_pages()
4122 struct scrub_ctx *sctx = nocow_ctx->sctx; in copy_nocow_pages_worker() local
4134 fs_info = sctx->dev_root->fs_info; in copy_nocow_pages_worker()
4139 spin_lock(&sctx->stat_lock); in copy_nocow_pages_worker()
4140 sctx->stat.malloc_errors++; in copy_nocow_pages_worker()
4141 spin_unlock(&sctx->stat_lock); in copy_nocow_pages_worker()
4199 scrub_pending_trans_workers_dec(sctx); in copy_nocow_pages_worker()
4249 struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; in copy_nocow_pages_for_inode()
4345 err = write_page_nocow(nocow_ctx->sctx, in copy_nocow_pages_for_inode()
4368 static int write_page_nocow(struct scrub_ctx *sctx, in write_page_nocow() argument
4375 dev = sctx->wr_ctx.tgtdev; in write_page_nocow()
4385 spin_lock(&sctx->stat_lock); in write_page_nocow()
4386 sctx->stat.malloc_errors++; in write_page_nocow()
4387 spin_unlock(&sctx->stat_lock); in write_page_nocow()