rbio 37 drivers/md/raid1-10.c static void rbio_pool_free(void *rbio, void *data) rbio 39 drivers/md/raid1-10.c kfree(rbio); rbio 164 drivers/md/raid10.c struct bio *rbio = r10_bio->devs[j].repl_bio; rbio 168 drivers/md/raid10.c if (rbio) rbio 184 drivers/md/raid10.c if (rbio) { rbio 186 drivers/md/raid10.c rbio->bi_private = rp_repl; rbio 177 fs/btrfs/raid56.c static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); rbio 178 fs/btrfs/raid56.c static noinline void finish_rmw(struct btrfs_raid_bio *rbio); rbio 181 fs/btrfs/raid56.c static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); rbio 182 fs/btrfs/raid56.c static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed); rbio 183 fs/btrfs/raid56.c static void __free_raid_bio(struct btrfs_raid_bio *rbio); rbio 184 fs/btrfs/raid56.c static void index_rbio_pages(struct btrfs_raid_bio *rbio); rbio 185 fs/btrfs/raid56.c static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); rbio 187 fs/btrfs/raid56.c static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, rbio 191 fs/btrfs/raid56.c static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func) rbio 193 fs/btrfs/raid56.c btrfs_init_work(&rbio->work, work_func, NULL, NULL); rbio 194 fs/btrfs/raid56.c btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); rbio 252 fs/btrfs/raid56.c static void cache_rbio_pages(struct btrfs_raid_bio *rbio) rbio 259 fs/btrfs/raid56.c ret = alloc_rbio_pages(rbio); rbio 263 fs/btrfs/raid56.c for (i = 0; i < rbio->nr_pages; i++) { rbio 264 fs/btrfs/raid56.c if (!rbio->bio_pages[i]) rbio 267 fs/btrfs/raid56.c s = kmap(rbio->bio_pages[i]); rbio 268 fs/btrfs/raid56.c d = kmap(rbio->stripe_pages[i]); rbio 272 fs/btrfs/raid56.c kunmap(rbio->bio_pages[i]); rbio 273 fs/btrfs/raid56.c kunmap(rbio->stripe_pages[i]); rbio 274 fs/btrfs/raid56.c SetPageUptodate(rbio->stripe_pages[i]); rbio 276 fs/btrfs/raid56.c set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); rbio 282 fs/btrfs/raid56.c static int rbio_bucket(struct btrfs_raid_bio *rbio) rbio 284 fs/btrfs/raid56.c u64 num = rbio->bbio->raid_map[0]; rbio 345 fs/btrfs/raid56.c static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) rbio 347 fs/btrfs/raid56.c int bucket = rbio_bucket(rbio); rbio 355 fs/btrfs/raid56.c if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) rbio 358 fs/btrfs/raid56.c table = rbio->fs_info->stripe_hash_table; rbio 370 fs/btrfs/raid56.c spin_lock(&rbio->bio_list_lock); rbio 372 fs/btrfs/raid56.c if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { rbio 373 fs/btrfs/raid56.c list_del_init(&rbio->stripe_cache); rbio 386 fs/btrfs/raid56.c if (bio_list_empty(&rbio->bio_list)) { rbio 387 fs/btrfs/raid56.c if (!list_empty(&rbio->hash_list)) { rbio 388 fs/btrfs/raid56.c list_del_init(&rbio->hash_list); rbio 389 fs/btrfs/raid56.c refcount_dec(&rbio->refs); rbio 390 fs/btrfs/raid56.c BUG_ON(!list_empty(&rbio->plug_list)); rbio 395 fs/btrfs/raid56.c spin_unlock(&rbio->bio_list_lock); rbio 399 fs/btrfs/raid56.c __free_raid_bio(rbio); rbio 405 fs/btrfs/raid56.c static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) rbio 410 fs/btrfs/raid56.c if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) rbio 413 fs/btrfs/raid56.c table = rbio->fs_info->stripe_hash_table; rbio 416 fs/btrfs/raid56.c __remove_rbio_from_cache(rbio); rbio 427 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio; rbio 433 fs/btrfs/raid56.c rbio = list_entry(table->stripe_cache.next, rbio 436 fs/btrfs/raid56.c __remove_rbio_from_cache(rbio); rbio 465 fs/btrfs/raid56.c static void cache_rbio(struct btrfs_raid_bio *rbio) rbio 470 fs/btrfs/raid56.c if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) rbio 473 fs/btrfs/raid56.c table = rbio->fs_info->stripe_hash_table; rbio 476 fs/btrfs/raid56.c spin_lock(&rbio->bio_list_lock); rbio 479 fs/btrfs/raid56.c if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) rbio 480 fs/btrfs/raid56.c refcount_inc(&rbio->refs); rbio 482 fs/btrfs/raid56.c if (!list_empty(&rbio->stripe_cache)){ rbio 483 fs/btrfs/raid56.c list_move(&rbio->stripe_cache, &table->stripe_cache); rbio 485 fs/btrfs/raid56.c list_add(&rbio->stripe_cache, &table->stripe_cache); rbio 489 fs/btrfs/raid56.c spin_unlock(&rbio->bio_list_lock); rbio 498 fs/btrfs/raid56.c if (found != rbio) rbio 529 fs/btrfs/raid56.c static int rbio_is_full(struct btrfs_raid_bio *rbio) rbio 532 fs/btrfs/raid56.c unsigned long size = rbio->bio_list_bytes; rbio 535 fs/btrfs/raid56.c spin_lock_irqsave(&rbio->bio_list_lock, flags); rbio 536 fs/btrfs/raid56.c if (size != rbio->nr_data * rbio->stripe_len) rbio 538 fs/btrfs/raid56.c BUG_ON(size > rbio->nr_data * rbio->stripe_len); rbio 539 fs/btrfs/raid56.c spin_unlock_irqrestore(&rbio->bio_list_lock, flags); rbio 615 fs/btrfs/raid56.c static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, rbio 618 fs/btrfs/raid56.c return stripe * rbio->stripe_npages + index; rbio 625 fs/btrfs/raid56.c static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, rbio 628 fs/btrfs/raid56.c return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; rbio 634 fs/btrfs/raid56.c static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) rbio 636 fs/btrfs/raid56.c return rbio_stripe_page(rbio, rbio->nr_data, index); rbio 643 fs/btrfs/raid56.c static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) rbio 645 fs/btrfs/raid56.c if (rbio->nr_data + 1 == rbio->real_stripes) rbio 647 fs/btrfs/raid56.c return rbio_stripe_page(rbio, rbio->nr_data + 1, index); rbio 672 fs/btrfs/raid56.c static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) rbio 674 fs/btrfs/raid56.c int bucket = rbio_bucket(rbio); rbio 675 fs/btrfs/raid56.c struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; rbio 685 fs/btrfs/raid56.c if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { rbio 696 fs/btrfs/raid56.c steal_rbio(cur, rbio); rbio 704 fs/btrfs/raid56.c if (rbio_can_merge(cur, rbio)) { rbio 705 fs/btrfs/raid56.c merge_rbio(cur, rbio); rbio 707 fs/btrfs/raid56.c freeit = rbio; rbio 723 fs/btrfs/raid56.c if (rbio_can_merge(pending, rbio)) { rbio 724 fs/btrfs/raid56.c merge_rbio(pending, rbio); rbio 726 fs/btrfs/raid56.c freeit = rbio; rbio 736 fs/btrfs/raid56.c list_add_tail(&rbio->plug_list, &cur->plug_list); rbio 743 fs/btrfs/raid56.c refcount_inc(&rbio->refs); rbio 744 fs/btrfs/raid56.c list_add(&rbio->hash_list, &h->hash_list); rbio 758 fs/btrfs/raid56.c static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) rbio 765 fs/btrfs/raid56.c bucket = rbio_bucket(rbio); rbio 766 fs/btrfs/raid56.c h = rbio->fs_info->stripe_hash_table->table + bucket; rbio 768 fs/btrfs/raid56.c if (list_empty(&rbio->plug_list)) rbio 769 fs/btrfs/raid56.c cache_rbio(rbio); rbio 772 fs/btrfs/raid56.c spin_lock(&rbio->bio_list_lock); rbio 774 fs/btrfs/raid56.c if (!list_empty(&rbio->hash_list)) { rbio 780 fs/btrfs/raid56.c if (list_empty(&rbio->plug_list) && rbio 781 fs/btrfs/raid56.c test_bit(RBIO_CACHE_BIT, &rbio->flags)) { rbio 783 fs/btrfs/raid56.c clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); rbio 784 fs/btrfs/raid56.c BUG_ON(!bio_list_empty(&rbio->bio_list)); rbio 788 fs/btrfs/raid56.c list_del_init(&rbio->hash_list); rbio 789 fs/btrfs/raid56.c refcount_dec(&rbio->refs); rbio 796 fs/btrfs/raid56.c if (!list_empty(&rbio->plug_list)) { rbio 798 fs/btrfs/raid56.c struct list_head *head = rbio->plug_list.next; rbio 803 fs/btrfs/raid56.c list_del_init(&rbio->plug_list); rbio 807 fs/btrfs/raid56.c spin_unlock(&rbio->bio_list_lock); rbio 813 fs/btrfs/raid56.c steal_rbio(rbio, next); rbio 816 fs/btrfs/raid56.c steal_rbio(rbio, next); rbio 819 fs/btrfs/raid56.c steal_rbio(rbio, next); rbio 827 fs/btrfs/raid56.c spin_unlock(&rbio->bio_list_lock); rbio 832 fs/btrfs/raid56.c remove_rbio_from_cache(rbio); rbio 835 fs/btrfs/raid56.c static void __free_raid_bio(struct btrfs_raid_bio *rbio) rbio 839 fs/btrfs/raid56.c if (!refcount_dec_and_test(&rbio->refs)) rbio 842 fs/btrfs/raid56.c WARN_ON(!list_empty(&rbio->stripe_cache)); rbio 843 fs/btrfs/raid56.c WARN_ON(!list_empty(&rbio->hash_list)); rbio 844 fs/btrfs/raid56.c WARN_ON(!bio_list_empty(&rbio->bio_list)); rbio 846 fs/btrfs/raid56.c for (i = 0; i < rbio->nr_pages; i++) { rbio 847 fs/btrfs/raid56.c if (rbio->stripe_pages[i]) { rbio 848 fs/btrfs/raid56.c __free_page(rbio->stripe_pages[i]); rbio 849 fs/btrfs/raid56.c rbio->stripe_pages[i] = NULL; rbio 853 fs/btrfs/raid56.c btrfs_put_bbio(rbio->bbio); rbio 854 fs/btrfs/raid56.c kfree(rbio); rbio 874 fs/btrfs/raid56.c static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) rbio 876 fs/btrfs/raid56.c struct bio *cur = bio_list_get(&rbio->bio_list); rbio 879 fs/btrfs/raid56.c if (rbio->generic_bio_cnt) rbio 880 fs/btrfs/raid56.c btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); rbio 890 fs/btrfs/raid56.c unlock_stripe(rbio); rbio 891 fs/btrfs/raid56.c extra = bio_list_get(&rbio->bio_list); rbio 892 fs/btrfs/raid56.c __free_raid_bio(rbio); rbio 905 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio = bio->bi_private; rbio 910 fs/btrfs/raid56.c fail_bio_stripe(rbio, bio); rbio 914 fs/btrfs/raid56.c if (!atomic_dec_and_test(&rbio->stripes_pending)) rbio 920 fs/btrfs/raid56.c max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? rbio 921 fs/btrfs/raid56.c 0 : rbio->bbio->max_errors; rbio 922 fs/btrfs/raid56.c if (atomic_read(&rbio->error) > max_errors) rbio 925 fs/btrfs/raid56.c rbio_orig_end_io(rbio, err); rbio 944 fs/btrfs/raid56.c static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, rbio 950 fs/btrfs/raid56.c chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; rbio 952 fs/btrfs/raid56.c spin_lock_irq(&rbio->bio_list_lock); rbio 953 fs/btrfs/raid56.c p = rbio->bio_pages[chunk_page]; rbio 954 fs/btrfs/raid56.c spin_unlock_irq(&rbio->bio_list_lock); rbio 959 fs/btrfs/raid56.c return rbio->stripe_pages[chunk_page]; rbio 979 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio; rbio 986 fs/btrfs/raid56.c rbio = kzalloc(sizeof(*rbio) + rbio 987 fs/btrfs/raid56.c sizeof(*rbio->stripe_pages) * num_pages + rbio 988 fs/btrfs/raid56.c sizeof(*rbio->bio_pages) * num_pages + rbio 989 fs/btrfs/raid56.c sizeof(*rbio->finish_pointers) * real_stripes + rbio 990 fs/btrfs/raid56.c sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) + rbio 991 fs/btrfs/raid56.c sizeof(*rbio->finish_pbitmap) * rbio 994 fs/btrfs/raid56.c if (!rbio) rbio 997 fs/btrfs/raid56.c bio_list_init(&rbio->bio_list); rbio 998 fs/btrfs/raid56.c INIT_LIST_HEAD(&rbio->plug_list); rbio 999 fs/btrfs/raid56.c spin_lock_init(&rbio->bio_list_lock); rbio 1000 fs/btrfs/raid56.c INIT_LIST_HEAD(&rbio->stripe_cache); rbio 1001 fs/btrfs/raid56.c INIT_LIST_HEAD(&rbio->hash_list); rbio 1002 fs/btrfs/raid56.c rbio->bbio = bbio; rbio 1003 fs/btrfs/raid56.c rbio->fs_info = fs_info; rbio 1004 fs/btrfs/raid56.c rbio->stripe_len = stripe_len; rbio 1005 fs/btrfs/raid56.c rbio->nr_pages = num_pages; rbio 1006 fs/btrfs/raid56.c rbio->real_stripes = real_stripes; rbio 1007 fs/btrfs/raid56.c rbio->stripe_npages = stripe_npages; rbio 1008 fs/btrfs/raid56.c rbio->faila = -1; rbio 1009 fs/btrfs/raid56.c rbio->failb = -1; rbio 1010 fs/btrfs/raid56.c refcount_set(&rbio->refs, 1); rbio 1011 fs/btrfs/raid56.c atomic_set(&rbio->error, 0); rbio 1012 fs/btrfs/raid56.c atomic_set(&rbio->stripes_pending, 0); rbio 1018 fs/btrfs/raid56.c p = rbio + 1; rbio 1023 fs/btrfs/raid56.c CONSUME_ALLOC(rbio->stripe_pages, num_pages); rbio 1024 fs/btrfs/raid56.c CONSUME_ALLOC(rbio->bio_pages, num_pages); rbio 1025 fs/btrfs/raid56.c CONSUME_ALLOC(rbio->finish_pointers, real_stripes); rbio 1026 fs/btrfs/raid56.c CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages)); rbio 1027 fs/btrfs/raid56.c CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages)); rbio 1037 fs/btrfs/raid56.c rbio->nr_data = nr_data; rbio 1038 fs/btrfs/raid56.c return rbio; rbio 1042 fs/btrfs/raid56.c static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) rbio 1047 fs/btrfs/raid56.c for (i = 0; i < rbio->nr_pages; i++) { rbio 1048 fs/btrfs/raid56.c if (rbio->stripe_pages[i]) rbio 1053 fs/btrfs/raid56.c rbio->stripe_pages[i] = page; rbio 1059 fs/btrfs/raid56.c static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) rbio 1064 fs/btrfs/raid56.c i = rbio_stripe_page_index(rbio, rbio->nr_data, 0); rbio 1066 fs/btrfs/raid56.c for (; i < rbio->nr_pages; i++) { rbio 1067 fs/btrfs/raid56.c if (rbio->stripe_pages[i]) rbio 1072 fs/btrfs/raid56.c rbio->stripe_pages[i] = page; rbio 1082 fs/btrfs/raid56.c static int rbio_add_io_page(struct btrfs_raid_bio *rbio, rbio 1096 fs/btrfs/raid56.c stripe = &rbio->bbio->stripes[stripe_nr]; rbio 1101 fs/btrfs/raid56.c return fail_rbio_index(rbio, stripe_nr); rbio 1140 fs/btrfs/raid56.c static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) rbio 1142 fs/btrfs/raid56.c if (rbio->faila >= 0 || rbio->failb >= 0) { rbio 1143 fs/btrfs/raid56.c BUG_ON(rbio->faila == rbio->real_stripes - 1); rbio 1144 fs/btrfs/raid56.c __raid56_parity_recover(rbio); rbio 1146 fs/btrfs/raid56.c finish_rmw(rbio); rbio 1158 fs/btrfs/raid56.c static void index_rbio_pages(struct btrfs_raid_bio *rbio) rbio 1165 fs/btrfs/raid56.c spin_lock_irq(&rbio->bio_list_lock); rbio 1166 fs/btrfs/raid56.c bio_list_for_each(bio, &rbio->bio_list) { rbio 1172 fs/btrfs/raid56.c stripe_offset = start - rbio->bbio->raid_map[0]; rbio 1179 fs/btrfs/raid56.c rbio->bio_pages[page_index + i] = bvec.bv_page; rbio 1183 fs/btrfs/raid56.c spin_unlock_irq(&rbio->bio_list_lock); rbio 1194 fs/btrfs/raid56.c static noinline void finish_rmw(struct btrfs_raid_bio *rbio) rbio 1196 fs/btrfs/raid56.c struct btrfs_bio *bbio = rbio->bbio; rbio 1197 fs/btrfs/raid56.c void **pointers = rbio->finish_pointers; rbio 1198 fs/btrfs/raid56.c int nr_data = rbio->nr_data; rbio 1209 fs/btrfs/raid56.c if (rbio->real_stripes - rbio->nr_data == 1) { rbio 1210 fs/btrfs/raid56.c p_stripe = rbio->real_stripes - 1; rbio 1211 fs/btrfs/raid56.c } else if (rbio->real_stripes - rbio->nr_data == 2) { rbio 1212 fs/btrfs/raid56.c p_stripe = rbio->real_stripes - 2; rbio 1213 fs/btrfs/raid56.c q_stripe = rbio->real_stripes - 1; rbio 1226 fs/btrfs/raid56.c spin_lock_irq(&rbio->bio_list_lock); rbio 1227 fs/btrfs/raid56.c set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); rbio 1228 fs/btrfs/raid56.c spin_unlock_irq(&rbio->bio_list_lock); rbio 1230 fs/btrfs/raid56.c atomic_set(&rbio->error, 0); rbio 1241 fs/btrfs/raid56.c index_rbio_pages(rbio); rbio 1242 fs/btrfs/raid56.c if (!rbio_is_full(rbio)) rbio 1243 fs/btrfs/raid56.c cache_rbio_pages(rbio); rbio 1245 fs/btrfs/raid56.c clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); rbio 1247 fs/btrfs/raid56.c for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { rbio 1251 fs/btrfs/raid56.c p = page_in_rbio(rbio, stripe, pagenr, 0); rbio 1256 fs/btrfs/raid56.c p = rbio_pstripe_page(rbio, pagenr); rbio 1266 fs/btrfs/raid56.c p = rbio_qstripe_page(rbio, pagenr); rbio 1270 fs/btrfs/raid56.c raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, rbio 1279 fs/btrfs/raid56.c for (stripe = 0; stripe < rbio->real_stripes; stripe++) rbio 1280 fs/btrfs/raid56.c kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); rbio 1288 fs/btrfs/raid56.c for (stripe = 0; stripe < rbio->real_stripes; stripe++) { rbio 1289 fs/btrfs/raid56.c for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { rbio 1291 fs/btrfs/raid56.c if (stripe < rbio->nr_data) { rbio 1292 fs/btrfs/raid56.c page = page_in_rbio(rbio, stripe, pagenr, 1); rbio 1296 fs/btrfs/raid56.c page = rbio_stripe_page(rbio, stripe, pagenr); rbio 1299 fs/btrfs/raid56.c ret = rbio_add_io_page(rbio, &bio_list, rbio 1300 fs/btrfs/raid56.c page, stripe, pagenr, rbio->stripe_len); rbio 1309 fs/btrfs/raid56.c for (stripe = 0; stripe < rbio->real_stripes; stripe++) { rbio 1313 fs/btrfs/raid56.c for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { rbio 1315 fs/btrfs/raid56.c if (stripe < rbio->nr_data) { rbio 1316 fs/btrfs/raid56.c page = page_in_rbio(rbio, stripe, pagenr, 1); rbio 1320 fs/btrfs/raid56.c page = rbio_stripe_page(rbio, stripe, pagenr); rbio 1323 fs/btrfs/raid56.c ret = rbio_add_io_page(rbio, &bio_list, page, rbio 1324 fs/btrfs/raid56.c rbio->bbio->tgtdev_map[stripe], rbio 1325 fs/btrfs/raid56.c pagenr, rbio->stripe_len); rbio 1332 fs/btrfs/raid56.c atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); rbio 1333 fs/btrfs/raid56.c BUG_ON(atomic_read(&rbio->stripes_pending) == 0); rbio 1340 fs/btrfs/raid56.c bio->bi_private = rbio; rbio 1349 fs/btrfs/raid56.c rbio_orig_end_io(rbio, BLK_STS_IOERR); rbio 1360 fs/btrfs/raid56.c static int find_bio_stripe(struct btrfs_raid_bio *rbio, rbio 1370 fs/btrfs/raid56.c for (i = 0; i < rbio->bbio->num_stripes; i++) { rbio 1371 fs/btrfs/raid56.c stripe = &rbio->bbio->stripes[i]; rbio 1374 fs/btrfs/raid56.c physical < stripe_start + rbio->stripe_len && rbio 1389 fs/btrfs/raid56.c static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, rbio 1398 fs/btrfs/raid56.c for (i = 0; i < rbio->nr_data; i++) { rbio 1399 fs/btrfs/raid56.c stripe_start = rbio->bbio->raid_map[i]; rbio 1401 fs/btrfs/raid56.c logical < stripe_start + rbio->stripe_len) { rbio 1411 fs/btrfs/raid56.c static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) rbio 1416 fs/btrfs/raid56.c spin_lock_irqsave(&rbio->bio_list_lock, flags); rbio 1419 fs/btrfs/raid56.c if (rbio->faila == failed || rbio->failb == failed) rbio 1422 fs/btrfs/raid56.c if (rbio->faila == -1) { rbio 1424 fs/btrfs/raid56.c rbio->faila = failed; rbio 1425 fs/btrfs/raid56.c atomic_inc(&rbio->error); rbio 1426 fs/btrfs/raid56.c } else if (rbio->failb == -1) { rbio 1428 fs/btrfs/raid56.c rbio->failb = failed; rbio 1429 fs/btrfs/raid56.c atomic_inc(&rbio->error); rbio 1434 fs/btrfs/raid56.c spin_unlock_irqrestore(&rbio->bio_list_lock, flags); rbio 1443 fs/btrfs/raid56.c static int fail_bio_stripe(struct btrfs_raid_bio *rbio, rbio 1446 fs/btrfs/raid56.c int failed = find_bio_stripe(rbio, bio); rbio 1451 fs/btrfs/raid56.c return fail_rbio_index(rbio, failed); rbio 1479 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio = bio->bi_private; rbio 1482 fs/btrfs/raid56.c fail_bio_stripe(rbio, bio); rbio 1488 fs/btrfs/raid56.c if (!atomic_dec_and_test(&rbio->stripes_pending)) rbio 1491 fs/btrfs/raid56.c if (atomic_read(&rbio->error) > rbio->bbio->max_errors) rbio 1499 fs/btrfs/raid56.c validate_rbio_for_rmw(rbio); rbio 1504 fs/btrfs/raid56.c rbio_orig_end_io(rbio, BLK_STS_IOERR); rbio 1511 fs/btrfs/raid56.c static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) rbio 1522 fs/btrfs/raid56.c ret = alloc_rbio_pages(rbio); rbio 1526 fs/btrfs/raid56.c index_rbio_pages(rbio); rbio 1528 fs/btrfs/raid56.c atomic_set(&rbio->error, 0); rbio 1533 fs/btrfs/raid56.c for (stripe = 0; stripe < rbio->nr_data; stripe++) { rbio 1534 fs/btrfs/raid56.c for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { rbio 1542 fs/btrfs/raid56.c page = page_in_rbio(rbio, stripe, pagenr, 1); rbio 1546 fs/btrfs/raid56.c page = rbio_stripe_page(rbio, stripe, pagenr); rbio 1554 fs/btrfs/raid56.c ret = rbio_add_io_page(rbio, &bio_list, page, rbio 1555 fs/btrfs/raid56.c stripe, pagenr, rbio->stripe_len); rbio 1576 fs/btrfs/raid56.c atomic_set(&rbio->stripes_pending, bios_to_read); rbio 1582 fs/btrfs/raid56.c bio->bi_private = rbio; rbio 1586 fs/btrfs/raid56.c btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); rbio 1594 fs/btrfs/raid56.c rbio_orig_end_io(rbio, BLK_STS_IOERR); rbio 1602 fs/btrfs/raid56.c validate_rbio_for_rmw(rbio); rbio 1610 fs/btrfs/raid56.c static int full_stripe_write(struct btrfs_raid_bio *rbio) rbio 1614 fs/btrfs/raid56.c ret = alloc_rbio_parity_pages(rbio); rbio 1616 fs/btrfs/raid56.c __free_raid_bio(rbio); rbio 1620 fs/btrfs/raid56.c ret = lock_stripe_add(rbio); rbio 1622 fs/btrfs/raid56.c finish_rmw(rbio); rbio 1631 fs/btrfs/raid56.c static int partial_stripe_write(struct btrfs_raid_bio *rbio) rbio 1635 fs/btrfs/raid56.c ret = lock_stripe_add(rbio); rbio 1637 fs/btrfs/raid56.c start_async_work(rbio, rmw_work); rbio 1647 fs/btrfs/raid56.c static int __raid56_parity_write(struct btrfs_raid_bio *rbio) rbio 1650 fs/btrfs/raid56.c if (!rbio_is_full(rbio)) rbio 1651 fs/btrfs/raid56.c return partial_stripe_write(rbio); rbio 1652 fs/btrfs/raid56.c return full_stripe_write(rbio); rbio 1760 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio; rbio 1765 fs/btrfs/raid56.c rbio = alloc_rbio(fs_info, bbio, stripe_len); rbio 1766 fs/btrfs/raid56.c if (IS_ERR(rbio)) { rbio 1768 fs/btrfs/raid56.c return PTR_ERR(rbio); rbio 1770 fs/btrfs/raid56.c bio_list_add(&rbio->bio_list, bio); rbio 1771 fs/btrfs/raid56.c rbio->bio_list_bytes = bio->bi_iter.bi_size; rbio 1772 fs/btrfs/raid56.c rbio->operation = BTRFS_RBIO_WRITE; rbio 1775 fs/btrfs/raid56.c rbio->generic_bio_cnt = 1; rbio 1781 fs/btrfs/raid56.c if (rbio_is_full(rbio)) { rbio 1782 fs/btrfs/raid56.c ret = full_stripe_write(rbio); rbio 1795 fs/btrfs/raid56.c list_add_tail(&rbio->plug_list, &plug->rbio_list); rbio 1798 fs/btrfs/raid56.c ret = __raid56_parity_write(rbio); rbio 1810 fs/btrfs/raid56.c static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) rbio 1819 fs/btrfs/raid56.c pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); rbio 1825 fs/btrfs/raid56.c faila = rbio->faila; rbio 1826 fs/btrfs/raid56.c failb = rbio->failb; rbio 1828 fs/btrfs/raid56.c if (rbio->operation == BTRFS_RBIO_READ_REBUILD || rbio 1829 fs/btrfs/raid56.c rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { rbio 1830 fs/btrfs/raid56.c spin_lock_irq(&rbio->bio_list_lock); rbio 1831 fs/btrfs/raid56.c set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); rbio 1832 fs/btrfs/raid56.c spin_unlock_irq(&rbio->bio_list_lock); rbio 1835 fs/btrfs/raid56.c index_rbio_pages(rbio); rbio 1837 fs/btrfs/raid56.c for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { rbio 1842 fs/btrfs/raid56.c if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && rbio 1843 fs/btrfs/raid56.c !test_bit(pagenr, rbio->dbitmap)) rbio 1849 fs/btrfs/raid56.c for (stripe = 0; stripe < rbio->real_stripes; stripe++) { rbio 1854 fs/btrfs/raid56.c if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || rbio 1855 fs/btrfs/raid56.c rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && rbio 1857 fs/btrfs/raid56.c page = page_in_rbio(rbio, stripe, pagenr, 0); rbio 1859 fs/btrfs/raid56.c page = rbio_stripe_page(rbio, stripe, pagenr); rbio 1865 fs/btrfs/raid56.c if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { rbio 1871 fs/btrfs/raid56.c if (faila == rbio->nr_data) { rbio 1900 fs/btrfs/raid56.c if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { rbio 1901 fs/btrfs/raid56.c if (rbio->bbio->raid_map[faila] == rbio 1913 fs/btrfs/raid56.c if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { rbio 1914 fs/btrfs/raid56.c raid6_datap_recov(rbio->real_stripes, rbio 1917 fs/btrfs/raid56.c raid6_2data_recov(rbio->real_stripes, rbio 1928 fs/btrfs/raid56.c copy_page(pointers[faila], pointers[rbio->nr_data]); rbio 1932 fs/btrfs/raid56.c for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) rbio 1934 fs/btrfs/raid56.c pointers[rbio->nr_data - 1] = p; rbio 1937 fs/btrfs/raid56.c run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE); rbio 1945 fs/btrfs/raid56.c if (rbio->operation == BTRFS_RBIO_WRITE) { rbio 1946 fs/btrfs/raid56.c for (i = 0; i < rbio->stripe_npages; i++) { rbio 1948 fs/btrfs/raid56.c page = rbio_stripe_page(rbio, faila, i); rbio 1952 fs/btrfs/raid56.c page = rbio_stripe_page(rbio, failb, i); rbio 1957 fs/btrfs/raid56.c for (stripe = 0; stripe < rbio->real_stripes; stripe++) { rbio 1962 fs/btrfs/raid56.c if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || rbio 1963 fs/btrfs/raid56.c rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && rbio 1965 fs/btrfs/raid56.c page = page_in_rbio(rbio, stripe, pagenr, 0); rbio 1967 fs/btrfs/raid56.c page = rbio_stripe_page(rbio, stripe, pagenr); rbio 1983 fs/btrfs/raid56.c if (rbio->operation == BTRFS_RBIO_READ_REBUILD || rbio 1984 fs/btrfs/raid56.c rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { rbio 2000 fs/btrfs/raid56.c if (err == BLK_STS_OK && rbio->failb < 0) rbio 2001 fs/btrfs/raid56.c cache_rbio_pages(rbio); rbio 2003 fs/btrfs/raid56.c clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); rbio 2005 fs/btrfs/raid56.c rbio_orig_end_io(rbio, err); rbio 2007 fs/btrfs/raid56.c rbio->faila = -1; rbio 2008 fs/btrfs/raid56.c rbio->failb = -1; rbio 2010 fs/btrfs/raid56.c if (rbio->operation == BTRFS_RBIO_WRITE) rbio 2011 fs/btrfs/raid56.c finish_rmw(rbio); rbio 2012 fs/btrfs/raid56.c else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) rbio 2013 fs/btrfs/raid56.c finish_parity_scrub(rbio, 0); rbio 2017 fs/btrfs/raid56.c rbio_orig_end_io(rbio, err); rbio 2027 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio = bio->bi_private; rbio 2034 fs/btrfs/raid56.c fail_bio_stripe(rbio, bio); rbio 2039 fs/btrfs/raid56.c if (!atomic_dec_and_test(&rbio->stripes_pending)) rbio 2042 fs/btrfs/raid56.c if (atomic_read(&rbio->error) > rbio->bbio->max_errors) rbio 2043 fs/btrfs/raid56.c rbio_orig_end_io(rbio, BLK_STS_IOERR); rbio 2045 fs/btrfs/raid56.c __raid_recover_end_io(rbio); rbio 2056 fs/btrfs/raid56.c static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) rbio 2067 fs/btrfs/raid56.c ret = alloc_rbio_pages(rbio); rbio 2071 fs/btrfs/raid56.c atomic_set(&rbio->error, 0); rbio 2078 fs/btrfs/raid56.c for (stripe = 0; stripe < rbio->real_stripes; stripe++) { rbio 2079 fs/btrfs/raid56.c if (rbio->faila == stripe || rbio->failb == stripe) { rbio 2080 fs/btrfs/raid56.c atomic_inc(&rbio->error); rbio 2084 fs/btrfs/raid56.c for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { rbio 2091 fs/btrfs/raid56.c p = rbio_stripe_page(rbio, stripe, pagenr); rbio 2095 fs/btrfs/raid56.c ret = rbio_add_io_page(rbio, &bio_list, rbio 2096 fs/btrfs/raid56.c rbio_stripe_page(rbio, stripe, pagenr), rbio 2097 fs/btrfs/raid56.c stripe, pagenr, rbio->stripe_len); rbio 2110 fs/btrfs/raid56.c if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { rbio 2111 fs/btrfs/raid56.c __raid_recover_end_io(rbio); rbio 2122 fs/btrfs/raid56.c atomic_set(&rbio->stripes_pending, bios_to_read); rbio 2128 fs/btrfs/raid56.c bio->bi_private = rbio; rbio 2132 fs/btrfs/raid56.c btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); rbio 2140 fs/btrfs/raid56.c if (rbio->operation == BTRFS_RBIO_READ_REBUILD || rbio 2141 fs/btrfs/raid56.c rbio->operation == BTRFS_RBIO_REBUILD_MISSING) rbio 2142 fs/btrfs/raid56.c rbio_orig_end_io(rbio, BLK_STS_IOERR); rbio 2160 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio; rbio 2168 fs/btrfs/raid56.c rbio = alloc_rbio(fs_info, bbio, stripe_len); rbio 2169 fs/btrfs/raid56.c if (IS_ERR(rbio)) { rbio 2172 fs/btrfs/raid56.c return PTR_ERR(rbio); rbio 2175 fs/btrfs/raid56.c rbio->operation = BTRFS_RBIO_READ_REBUILD; rbio 2176 fs/btrfs/raid56.c bio_list_add(&rbio->bio_list, bio); rbio 2177 fs/btrfs/raid56.c rbio->bio_list_bytes = bio->bi_iter.bi_size; rbio 2179 fs/btrfs/raid56.c rbio->faila = find_logical_bio_stripe(rbio, bio); rbio 2180 fs/btrfs/raid56.c if (rbio->faila == -1) { rbio 2187 fs/btrfs/raid56.c kfree(rbio); rbio 2193 fs/btrfs/raid56.c rbio->generic_bio_cnt = 1; rbio 2209 fs/btrfs/raid56.c rbio->failb = rbio->real_stripes - (mirror_num - 1); rbio 2210 fs/btrfs/raid56.c ASSERT(rbio->failb > 0); rbio 2211 fs/btrfs/raid56.c if (rbio->failb <= rbio->faila) rbio 2212 fs/btrfs/raid56.c rbio->failb--; rbio 2215 fs/btrfs/raid56.c ret = lock_stripe_add(rbio); rbio 2225 fs/btrfs/raid56.c __raid56_parity_recover(rbio); rbio 2237 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio; rbio 2239 fs/btrfs/raid56.c rbio = container_of(work, struct btrfs_raid_bio, work); rbio 2240 fs/btrfs/raid56.c raid56_rmw_stripe(rbio); rbio 2245 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio; rbio 2247 fs/btrfs/raid56.c rbio = container_of(work, struct btrfs_raid_bio, work); rbio 2248 fs/btrfs/raid56.c __raid56_parity_recover(rbio); rbio 2267 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio; rbio 2270 fs/btrfs/raid56.c rbio = alloc_rbio(fs_info, bbio, stripe_len); rbio 2271 fs/btrfs/raid56.c if (IS_ERR(rbio)) rbio 2273 fs/btrfs/raid56.c bio_list_add(&rbio->bio_list, bio); rbio 2279 fs/btrfs/raid56.c rbio->operation = BTRFS_RBIO_PARITY_SCRUB; rbio 2286 fs/btrfs/raid56.c for (i = rbio->nr_data; i < rbio->real_stripes; i++) { rbio 2288 fs/btrfs/raid56.c rbio->scrubp = i; rbio 2292 fs/btrfs/raid56.c ASSERT(i < rbio->real_stripes); rbio 2296 fs/btrfs/raid56.c ASSERT(rbio->stripe_npages == stripe_nsectors); rbio 2297 fs/btrfs/raid56.c bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); rbio 2303 fs/btrfs/raid56.c rbio->generic_bio_cnt = 1; rbio 2305 fs/btrfs/raid56.c return rbio; rbio 2309 fs/btrfs/raid56.c void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, rbio 2315 fs/btrfs/raid56.c ASSERT(logical >= rbio->bbio->raid_map[0]); rbio 2316 fs/btrfs/raid56.c ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + rbio 2317 fs/btrfs/raid56.c rbio->stripe_len * rbio->nr_data); rbio 2318 fs/btrfs/raid56.c stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); rbio 2320 fs/btrfs/raid56.c rbio->bio_pages[index] = page; rbio 2327 fs/btrfs/raid56.c static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) rbio 2334 fs/btrfs/raid56.c for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { rbio 2335 fs/btrfs/raid56.c for (i = 0; i < rbio->real_stripes; i++) { rbio 2336 fs/btrfs/raid56.c index = i * rbio->stripe_npages + bit; rbio 2337 fs/btrfs/raid56.c if (rbio->stripe_pages[index]) rbio 2343 fs/btrfs/raid56.c rbio->stripe_pages[index] = page; rbio 2349 fs/btrfs/raid56.c static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, rbio 2352 fs/btrfs/raid56.c struct btrfs_bio *bbio = rbio->bbio; rbio 2353 fs/btrfs/raid56.c void **pointers = rbio->finish_pointers; rbio 2354 fs/btrfs/raid56.c unsigned long *pbitmap = rbio->finish_pbitmap; rbio 2355 fs/btrfs/raid56.c int nr_data = rbio->nr_data; rbio 2369 fs/btrfs/raid56.c if (rbio->real_stripes - rbio->nr_data == 1) { rbio 2370 fs/btrfs/raid56.c p_stripe = rbio->real_stripes - 1; rbio 2371 fs/btrfs/raid56.c } else if (rbio->real_stripes - rbio->nr_data == 2) { rbio 2372 fs/btrfs/raid56.c p_stripe = rbio->real_stripes - 2; rbio 2373 fs/btrfs/raid56.c q_stripe = rbio->real_stripes - 1; rbio 2378 fs/btrfs/raid56.c if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { rbio 2380 fs/btrfs/raid56.c bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); rbio 2388 fs/btrfs/raid56.c clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); rbio 2407 fs/btrfs/raid56.c atomic_set(&rbio->error, 0); rbio 2409 fs/btrfs/raid56.c for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { rbio 2414 fs/btrfs/raid56.c p = page_in_rbio(rbio, stripe, pagenr, 0); rbio 2429 fs/btrfs/raid56.c raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, rbio 2438 fs/btrfs/raid56.c p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); rbio 2440 fs/btrfs/raid56.c if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) rbio 2441 fs/btrfs/raid56.c copy_page(parity, pointers[rbio->scrubp]); rbio 2444 fs/btrfs/raid56.c bitmap_clear(rbio->dbitmap, pagenr, 1); rbio 2448 fs/btrfs/raid56.c kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); rbio 2462 fs/btrfs/raid56.c for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { rbio 2465 fs/btrfs/raid56.c page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); rbio 2466 fs/btrfs/raid56.c ret = rbio_add_io_page(rbio, &bio_list, rbio 2467 fs/btrfs/raid56.c page, rbio->scrubp, pagenr, rbio->stripe_len); rbio 2475 fs/btrfs/raid56.c for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { rbio 2478 fs/btrfs/raid56.c page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); rbio 2479 fs/btrfs/raid56.c ret = rbio_add_io_page(rbio, &bio_list, page, rbio 2480 fs/btrfs/raid56.c bbio->tgtdev_map[rbio->scrubp], rbio 2481 fs/btrfs/raid56.c pagenr, rbio->stripe_len); rbio 2490 fs/btrfs/raid56.c rbio_orig_end_io(rbio, BLK_STS_OK); rbio 2494 fs/btrfs/raid56.c atomic_set(&rbio->stripes_pending, nr_data); rbio 2501 fs/btrfs/raid56.c bio->bi_private = rbio; rbio 2510 fs/btrfs/raid56.c rbio_orig_end_io(rbio, BLK_STS_IOERR); rbio 2516 fs/btrfs/raid56.c static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) rbio 2518 fs/btrfs/raid56.c if (stripe >= 0 && stripe < rbio->nr_data) rbio 2530 fs/btrfs/raid56.c static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) rbio 2532 fs/btrfs/raid56.c if (atomic_read(&rbio->error) > rbio->bbio->max_errors) rbio 2535 fs/btrfs/raid56.c if (rbio->faila >= 0 || rbio->failb >= 0) { rbio 2538 fs/btrfs/raid56.c if (is_data_stripe(rbio, rbio->faila)) rbio 2540 fs/btrfs/raid56.c else if (is_parity_stripe(rbio->faila)) rbio 2541 fs/btrfs/raid56.c failp = rbio->faila; rbio 2543 fs/btrfs/raid56.c if (is_data_stripe(rbio, rbio->failb)) rbio 2545 fs/btrfs/raid56.c else if (is_parity_stripe(rbio->failb)) rbio 2546 fs/btrfs/raid56.c failp = rbio->failb; rbio 2553 fs/btrfs/raid56.c if (dfail > rbio->bbio->max_errors - 1) rbio 2561 fs/btrfs/raid56.c finish_parity_scrub(rbio, 0); rbio 2571 fs/btrfs/raid56.c if (failp != rbio->scrubp) rbio 2574 fs/btrfs/raid56.c __raid_recover_end_io(rbio); rbio 2576 fs/btrfs/raid56.c finish_parity_scrub(rbio, 1); rbio 2581 fs/btrfs/raid56.c rbio_orig_end_io(rbio, BLK_STS_IOERR); rbio 2594 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio = bio->bi_private; rbio 2597 fs/btrfs/raid56.c fail_bio_stripe(rbio, bio); rbio 2603 fs/btrfs/raid56.c if (!atomic_dec_and_test(&rbio->stripes_pending)) rbio 2611 fs/btrfs/raid56.c validate_rbio_for_parity_scrub(rbio); rbio 2614 fs/btrfs/raid56.c static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) rbio 2625 fs/btrfs/raid56.c ret = alloc_rbio_essential_pages(rbio); rbio 2629 fs/btrfs/raid56.c atomic_set(&rbio->error, 0); rbio 2634 fs/btrfs/raid56.c for (stripe = 0; stripe < rbio->real_stripes; stripe++) { rbio 2635 fs/btrfs/raid56.c for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { rbio 2643 fs/btrfs/raid56.c page = page_in_rbio(rbio, stripe, pagenr, 1); rbio 2647 fs/btrfs/raid56.c page = rbio_stripe_page(rbio, stripe, pagenr); rbio 2655 fs/btrfs/raid56.c ret = rbio_add_io_page(rbio, &bio_list, page, rbio 2656 fs/btrfs/raid56.c stripe, pagenr, rbio->stripe_len); rbio 2677 fs/btrfs/raid56.c atomic_set(&rbio->stripes_pending, bios_to_read); rbio 2683 fs/btrfs/raid56.c bio->bi_private = rbio; rbio 2687 fs/btrfs/raid56.c btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); rbio 2695 fs/btrfs/raid56.c rbio_orig_end_io(rbio, BLK_STS_IOERR); rbio 2703 fs/btrfs/raid56.c validate_rbio_for_parity_scrub(rbio); rbio 2708 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio; rbio 2710 fs/btrfs/raid56.c rbio = container_of(work, struct btrfs_raid_bio, work); rbio 2711 fs/btrfs/raid56.c raid56_parity_scrub_stripe(rbio); rbio 2714 fs/btrfs/raid56.c void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) rbio 2716 fs/btrfs/raid56.c if (!lock_stripe_add(rbio)) rbio 2717 fs/btrfs/raid56.c start_async_work(rbio, scrub_parity_work); rbio 2726 fs/btrfs/raid56.c struct btrfs_raid_bio *rbio; rbio 2728 fs/btrfs/raid56.c rbio = alloc_rbio(fs_info, bbio, length); rbio 2729 fs/btrfs/raid56.c if (IS_ERR(rbio)) rbio 2732 fs/btrfs/raid56.c rbio->operation = BTRFS_RBIO_REBUILD_MISSING; rbio 2733 fs/btrfs/raid56.c bio_list_add(&rbio->bio_list, bio); rbio 2740 fs/btrfs/raid56.c rbio->faila = find_logical_bio_stripe(rbio, bio); rbio 2741 fs/btrfs/raid56.c if (rbio->faila == -1) { rbio 2743 fs/btrfs/raid56.c kfree(rbio); rbio 2751 fs/btrfs/raid56.c rbio->generic_bio_cnt = 1; rbio 2753 fs/btrfs/raid56.c return rbio; rbio 2756 fs/btrfs/raid56.c void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) rbio 2758 fs/btrfs/raid56.c if (!lock_stripe_add(rbio)) rbio 2759 fs/btrfs/raid56.c start_async_work(rbio, read_rebuild_work); rbio 39 fs/btrfs/raid56.h void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, rbio 47 fs/btrfs/raid56.h void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio); rbio 52 fs/btrfs/raid56.h void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio); rbio 2169 fs/btrfs/scrub.c struct btrfs_raid_bio *rbio; rbio 2195 fs/btrfs/scrub.c rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length); rbio 2196 fs/btrfs/scrub.c if (!rbio) rbio 2202 fs/btrfs/scrub.c raid56_add_scrub_pages(rbio, spage->page, spage->logical); rbio 2208 fs/btrfs/scrub.c raid56_submit_missing_rbio(rbio); rbio 2753 fs/btrfs/scrub.c struct btrfs_raid_bio *rbio; rbio 2775 fs/btrfs/scrub.c rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio, rbio 2779 fs/btrfs/scrub.c if (!rbio) rbio 2783 fs/btrfs/scrub.c raid56_parity_submit_scrub_rbio(rbio);