Lines Matching refs:rbio

179 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
180 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
183 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
184 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
185 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
186 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
187 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
188 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
189 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
191 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
193 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
254 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) in cache_rbio_pages() argument
261 ret = alloc_rbio_pages(rbio); in cache_rbio_pages()
265 for (i = 0; i < rbio->nr_pages; i++) { in cache_rbio_pages()
266 if (!rbio->bio_pages[i]) in cache_rbio_pages()
269 s = kmap(rbio->bio_pages[i]); in cache_rbio_pages()
270 d = kmap(rbio->stripe_pages[i]); in cache_rbio_pages()
274 kunmap(rbio->bio_pages[i]); in cache_rbio_pages()
275 kunmap(rbio->stripe_pages[i]); in cache_rbio_pages()
276 SetPageUptodate(rbio->stripe_pages[i]); in cache_rbio_pages()
278 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
284 static int rbio_bucket(struct btrfs_raid_bio *rbio) in rbio_bucket() argument
286 u64 num = rbio->bbio->raid_map[0]; in rbio_bucket()
347 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in __remove_rbio_from_cache() argument
349 int bucket = rbio_bucket(rbio); in __remove_rbio_from_cache()
357 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
360 table = rbio->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
372 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
374 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
375 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
388 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
389 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
390 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
391 atomic_dec(&rbio->refs); in __remove_rbio_from_cache()
392 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
397 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
401 __free_raid_bio(rbio); in __remove_rbio_from_cache()
407 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in remove_rbio_from_cache() argument
412 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
415 table = rbio->fs_info->stripe_hash_table; in remove_rbio_from_cache()
418 __remove_rbio_from_cache(rbio); in remove_rbio_from_cache()
429 struct btrfs_raid_bio *rbio; in btrfs_clear_rbio_cache() local
435 rbio = list_entry(table->stripe_cache.next, in btrfs_clear_rbio_cache()
438 __remove_rbio_from_cache(rbio); in btrfs_clear_rbio_cache()
467 static void cache_rbio(struct btrfs_raid_bio *rbio) in cache_rbio() argument
472 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
475 table = rbio->fs_info->stripe_hash_table; in cache_rbio()
478 spin_lock(&rbio->bio_list_lock); in cache_rbio()
481 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
482 atomic_inc(&rbio->refs); in cache_rbio()
484 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
485 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
487 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
491 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
500 if (found != rbio) in cache_rbio()
535 static int __rbio_is_full(struct btrfs_raid_bio *rbio) in __rbio_is_full() argument
537 unsigned long size = rbio->bio_list_bytes; in __rbio_is_full()
540 if (size != rbio->nr_data * rbio->stripe_len) in __rbio_is_full()
543 BUG_ON(size > rbio->nr_data * rbio->stripe_len); in __rbio_is_full()
547 static int rbio_is_full(struct btrfs_raid_bio *rbio) in rbio_is_full() argument
552 spin_lock_irqsave(&rbio->bio_list_lock, flags); in rbio_is_full()
553 ret = __rbio_is_full(rbio); in rbio_is_full()
554 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in rbio_is_full()
611 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_pstripe_page() argument
613 index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; in rbio_pstripe_page()
614 return rbio->stripe_pages[index]; in rbio_pstripe_page()
621 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_qstripe_page() argument
623 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_page()
626 index += ((rbio->nr_data + 1) * rbio->stripe_len) >> in rbio_qstripe_page()
628 return rbio->stripe_pages[index]; in rbio_qstripe_page()
653 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) in lock_stripe_add() argument
655 int bucket = rbio_bucket(rbio); in lock_stripe_add()
656 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; in lock_stripe_add()
669 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { in lock_stripe_add()
680 steal_rbio(cur, rbio); in lock_stripe_add()
688 if (rbio_can_merge(cur, rbio)) { in lock_stripe_add()
689 merge_rbio(cur, rbio); in lock_stripe_add()
691 freeit = rbio; in lock_stripe_add()
707 if (rbio_can_merge(pending, rbio)) { in lock_stripe_add()
708 merge_rbio(pending, rbio); in lock_stripe_add()
710 freeit = rbio; in lock_stripe_add()
720 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
727 atomic_inc(&rbio->refs); in lock_stripe_add()
728 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
742 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) in unlock_stripe() argument
749 bucket = rbio_bucket(rbio); in unlock_stripe()
750 h = rbio->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
752 if (list_empty(&rbio->plug_list)) in unlock_stripe()
753 cache_rbio(rbio); in unlock_stripe()
756 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
758 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
764 if (list_empty(&rbio->plug_list) && in unlock_stripe()
765 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
767 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
768 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
772 list_del_init(&rbio->hash_list); in unlock_stripe()
773 atomic_dec(&rbio->refs); in unlock_stripe()
780 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
782 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
787 list_del_init(&rbio->plug_list); in unlock_stripe()
791 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
797 steal_rbio(rbio, next); in unlock_stripe()
800 steal_rbio(rbio, next); in unlock_stripe()
806 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
813 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
818 remove_rbio_from_cache(rbio); in unlock_stripe()
821 static void __free_raid_bio(struct btrfs_raid_bio *rbio) in __free_raid_bio() argument
825 WARN_ON(atomic_read(&rbio->refs) < 0); in __free_raid_bio()
826 if (!atomic_dec_and_test(&rbio->refs)) in __free_raid_bio()
829 WARN_ON(!list_empty(&rbio->stripe_cache)); in __free_raid_bio()
830 WARN_ON(!list_empty(&rbio->hash_list)); in __free_raid_bio()
831 WARN_ON(!bio_list_empty(&rbio->bio_list)); in __free_raid_bio()
833 for (i = 0; i < rbio->nr_pages; i++) { in __free_raid_bio()
834 if (rbio->stripe_pages[i]) { in __free_raid_bio()
835 __free_page(rbio->stripe_pages[i]); in __free_raid_bio()
836 rbio->stripe_pages[i] = NULL; in __free_raid_bio()
840 btrfs_put_bbio(rbio->bbio); in __free_raid_bio()
841 kfree(rbio); in __free_raid_bio()
844 static void free_raid_bio(struct btrfs_raid_bio *rbio) in free_raid_bio() argument
846 unlock_stripe(rbio); in free_raid_bio()
847 __free_raid_bio(rbio); in free_raid_bio()
854 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate) in rbio_orig_end_io() argument
856 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
859 if (rbio->generic_bio_cnt) in rbio_orig_end_io()
860 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); in rbio_orig_end_io()
862 free_raid_bio(rbio); in rbio_orig_end_io()
880 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_end_io() local
883 fail_bio_stripe(rbio, bio); in raid_write_end_io()
887 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_write_end_io()
893 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_write_end_io()
896 rbio_orig_end_io(rbio, err, 0); in raid_write_end_io()
916 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, in page_in_rbio() argument
922 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; in page_in_rbio()
924 spin_lock_irq(&rbio->bio_list_lock); in page_in_rbio()
925 p = rbio->bio_pages[chunk_page]; in page_in_rbio()
926 spin_unlock_irq(&rbio->bio_list_lock); in page_in_rbio()
931 return rbio->stripe_pages[chunk_page]; in page_in_rbio()
951 struct btrfs_raid_bio *rbio; in alloc_rbio() local
958 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 + in alloc_rbio()
961 if (!rbio) in alloc_rbio()
964 bio_list_init(&rbio->bio_list); in alloc_rbio()
965 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
966 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
967 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
968 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
969 rbio->bbio = bbio; in alloc_rbio()
970 rbio->fs_info = root->fs_info; in alloc_rbio()
971 rbio->stripe_len = stripe_len; in alloc_rbio()
972 rbio->nr_pages = num_pages; in alloc_rbio()
973 rbio->real_stripes = real_stripes; in alloc_rbio()
974 rbio->stripe_npages = stripe_npages; in alloc_rbio()
975 rbio->faila = -1; in alloc_rbio()
976 rbio->failb = -1; in alloc_rbio()
977 atomic_set(&rbio->refs, 1); in alloc_rbio()
978 atomic_set(&rbio->error, 0); in alloc_rbio()
979 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
985 p = rbio + 1; in alloc_rbio()
986 rbio->stripe_pages = p; in alloc_rbio()
987 rbio->bio_pages = p + sizeof(struct page *) * num_pages; in alloc_rbio()
988 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; in alloc_rbio()
997 rbio->nr_data = nr_data; in alloc_rbio()
998 return rbio; in alloc_rbio()
1002 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_pages() argument
1007 for (i = 0; i < rbio->nr_pages; i++) { in alloc_rbio_pages()
1008 if (rbio->stripe_pages[i]) in alloc_rbio_pages()
1013 rbio->stripe_pages[i] = page; in alloc_rbio_pages()
1020 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_parity_pages() argument
1025 i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; in alloc_rbio_parity_pages()
1027 for (; i < rbio->nr_pages; i++) { in alloc_rbio_parity_pages()
1028 if (rbio->stripe_pages[i]) in alloc_rbio_parity_pages()
1033 rbio->stripe_pages[i] = page; in alloc_rbio_parity_pages()
1043 static int rbio_add_io_page(struct btrfs_raid_bio *rbio, in rbio_add_io_page() argument
1057 stripe = &rbio->bbio->stripes[stripe_nr]; in rbio_add_io_page()
1062 return fail_rbio_index(rbio, stripe_nr); in rbio_add_io_page()
1104 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) in validate_rbio_for_rmw() argument
1106 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_rmw()
1107 BUG_ON(rbio->faila == rbio->real_stripes - 1); in validate_rbio_for_rmw()
1108 __raid56_parity_recover(rbio); in validate_rbio_for_rmw()
1110 finish_rmw(rbio); in validate_rbio_for_rmw()
1118 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page) in rbio_stripe_page() argument
1121 index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT); in rbio_stripe_page()
1123 return rbio->stripe_pages[index]; in rbio_stripe_page()
1134 static void index_rbio_pages(struct btrfs_raid_bio *rbio) in index_rbio_pages() argument
1143 spin_lock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1144 bio_list_for_each(bio, &rbio->bio_list) { in index_rbio_pages()
1146 stripe_offset = start - rbio->bbio->raid_map[0]; in index_rbio_pages()
1151 rbio->bio_pages[page_index + i] = p; in index_rbio_pages()
1154 spin_unlock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1165 static noinline void finish_rmw(struct btrfs_raid_bio *rbio) in finish_rmw() argument
1167 struct btrfs_bio *bbio = rbio->bbio; in finish_rmw()
1168 void *pointers[rbio->real_stripes]; in finish_rmw()
1169 int stripe_len = rbio->stripe_len; in finish_rmw()
1170 int nr_data = rbio->nr_data; in finish_rmw()
1182 if (rbio->real_stripes - rbio->nr_data == 1) { in finish_rmw()
1183 p_stripe = rbio->real_stripes - 1; in finish_rmw()
1184 } else if (rbio->real_stripes - rbio->nr_data == 2) { in finish_rmw()
1185 p_stripe = rbio->real_stripes - 2; in finish_rmw()
1186 q_stripe = rbio->real_stripes - 1; in finish_rmw()
1199 spin_lock_irq(&rbio->bio_list_lock); in finish_rmw()
1200 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in finish_rmw()
1201 spin_unlock_irq(&rbio->bio_list_lock); in finish_rmw()
1203 atomic_set(&rbio->error, 0); in finish_rmw()
1214 index_rbio_pages(rbio); in finish_rmw()
1215 if (!rbio_is_full(rbio)) in finish_rmw()
1216 cache_rbio_pages(rbio); in finish_rmw()
1218 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_rmw()
1224 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_rmw()
1229 p = rbio_pstripe_page(rbio, pagenr); in finish_rmw()
1239 p = rbio_qstripe_page(rbio, pagenr); in finish_rmw()
1243 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_rmw()
1252 for (stripe = 0; stripe < rbio->real_stripes; stripe++) in finish_rmw()
1253 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_rmw()
1261 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1264 if (stripe < rbio->nr_data) { in finish_rmw()
1265 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1269 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1272 ret = rbio_add_io_page(rbio, &bio_list, in finish_rmw()
1273 page, stripe, pagenr, rbio->stripe_len); in finish_rmw()
1282 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1288 if (stripe < rbio->nr_data) { in finish_rmw()
1289 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1293 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1296 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_rmw()
1297 rbio->bbio->tgtdev_map[stripe], in finish_rmw()
1298 pagenr, rbio->stripe_len); in finish_rmw()
1305 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); in finish_rmw()
1306 BUG_ON(atomic_read(&rbio->stripes_pending) == 0); in finish_rmw()
1313 bio->bi_private = rbio; in finish_rmw()
1321 rbio_orig_end_io(rbio, -EIO, 0); in finish_rmw()
1329 static int find_bio_stripe(struct btrfs_raid_bio *rbio, in find_bio_stripe() argument
1339 for (i = 0; i < rbio->bbio->num_stripes; i++) { in find_bio_stripe()
1340 stripe = &rbio->bbio->stripes[i]; in find_bio_stripe()
1343 physical < stripe_start + rbio->stripe_len && in find_bio_stripe()
1356 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, in find_logical_bio_stripe() argument
1365 for (i = 0; i < rbio->nr_data; i++) { in find_logical_bio_stripe()
1366 stripe_start = rbio->bbio->raid_map[i]; in find_logical_bio_stripe()
1368 logical < stripe_start + rbio->stripe_len) { in find_logical_bio_stripe()
1378 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) in fail_rbio_index() argument
1383 spin_lock_irqsave(&rbio->bio_list_lock, flags); in fail_rbio_index()
1386 if (rbio->faila == failed || rbio->failb == failed) in fail_rbio_index()
1389 if (rbio->faila == -1) { in fail_rbio_index()
1391 rbio->faila = failed; in fail_rbio_index()
1392 atomic_inc(&rbio->error); in fail_rbio_index()
1393 } else if (rbio->failb == -1) { in fail_rbio_index()
1395 rbio->failb = failed; in fail_rbio_index()
1396 atomic_inc(&rbio->error); in fail_rbio_index()
1401 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in fail_rbio_index()
1410 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, in fail_bio_stripe() argument
1413 int failed = find_bio_stripe(rbio, bio); in fail_bio_stripe()
1418 return fail_rbio_index(rbio, failed); in fail_bio_stripe()
1446 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_rmw_end_io() local
1449 fail_bio_stripe(rbio, bio); in raid_rmw_end_io()
1455 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_rmw_end_io()
1459 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_rmw_end_io()
1467 validate_rbio_for_rmw(rbio); in raid_rmw_end_io()
1472 rbio_orig_end_io(rbio, -EIO, 0); in raid_rmw_end_io()
1475 static void async_rmw_stripe(struct btrfs_raid_bio *rbio) in async_rmw_stripe() argument
1477 btrfs_init_work(&rbio->work, btrfs_rmw_helper, in async_rmw_stripe()
1480 btrfs_queue_work(rbio->fs_info->rmw_workers, in async_rmw_stripe()
1481 &rbio->work); in async_rmw_stripe()
1484 static void async_read_rebuild(struct btrfs_raid_bio *rbio) in async_read_rebuild() argument
1486 btrfs_init_work(&rbio->work, btrfs_rmw_helper, in async_read_rebuild()
1489 btrfs_queue_work(rbio->fs_info->rmw_workers, in async_read_rebuild()
1490 &rbio->work); in async_read_rebuild()
1497 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) in raid56_rmw_stripe() argument
1502 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); in raid56_rmw_stripe()
1509 ret = alloc_rbio_pages(rbio); in raid56_rmw_stripe()
1513 index_rbio_pages(rbio); in raid56_rmw_stripe()
1515 atomic_set(&rbio->error, 0); in raid56_rmw_stripe()
1520 for (stripe = 0; stripe < rbio->nr_data; stripe++) { in raid56_rmw_stripe()
1529 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_rmw_stripe()
1533 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_rmw_stripe()
1541 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_rmw_stripe()
1542 stripe, pagenr, rbio->stripe_len); in raid56_rmw_stripe()
1563 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_rmw_stripe()
1569 bio->bi_private = rbio; in raid56_rmw_stripe()
1572 btrfs_bio_wq_end_io(rbio->fs_info, bio, in raid56_rmw_stripe()
1582 rbio_orig_end_io(rbio, -EIO, 0); in raid56_rmw_stripe()
1586 validate_rbio_for_rmw(rbio); in raid56_rmw_stripe()
1594 static int full_stripe_write(struct btrfs_raid_bio *rbio) in full_stripe_write() argument
1598 ret = alloc_rbio_parity_pages(rbio); in full_stripe_write()
1600 __free_raid_bio(rbio); in full_stripe_write()
1604 ret = lock_stripe_add(rbio); in full_stripe_write()
1606 finish_rmw(rbio); in full_stripe_write()
1615 static int partial_stripe_write(struct btrfs_raid_bio *rbio) in partial_stripe_write() argument
1619 ret = lock_stripe_add(rbio); in partial_stripe_write()
1621 async_rmw_stripe(rbio); in partial_stripe_write()
1631 static int __raid56_parity_write(struct btrfs_raid_bio *rbio) in __raid56_parity_write() argument
1634 if (!rbio_is_full(rbio)) in __raid56_parity_write()
1635 return partial_stripe_write(rbio); in __raid56_parity_write()
1636 return full_stripe_write(rbio); in __raid56_parity_write()
1742 struct btrfs_raid_bio *rbio; in raid56_parity_write() local
1747 rbio = alloc_rbio(root, bbio, stripe_len); in raid56_parity_write()
1748 if (IS_ERR(rbio)) { in raid56_parity_write()
1750 return PTR_ERR(rbio); in raid56_parity_write()
1752 bio_list_add(&rbio->bio_list, bio); in raid56_parity_write()
1753 rbio->bio_list_bytes = bio->bi_iter.bi_size; in raid56_parity_write()
1754 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1757 rbio->generic_bio_cnt = 1; in raid56_parity_write()
1763 if (rbio_is_full(rbio)) { in raid56_parity_write()
1764 ret = full_stripe_write(rbio); in raid56_parity_write()
1778 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1781 ret = __raid56_parity_write(rbio); in raid56_parity_write()
1793 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) in __raid_recover_end_io() argument
1798 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); in __raid_recover_end_io()
1803 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in __raid_recover_end_io()
1809 faila = rbio->faila; in __raid_recover_end_io()
1810 failb = rbio->failb; in __raid_recover_end_io()
1812 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in __raid_recover_end_io()
1813 spin_lock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1814 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in __raid_recover_end_io()
1815 spin_unlock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1818 index_rbio_pages(rbio); in __raid_recover_end_io()
1825 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in __raid_recover_end_io()
1826 !test_bit(pagenr, rbio->dbitmap)) in __raid_recover_end_io()
1832 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1837 if (rbio->operation == BTRFS_RBIO_READ_REBUILD && in __raid_recover_end_io()
1839 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1841 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
1847 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { in __raid_recover_end_io()
1853 if (faila == rbio->nr_data) { in __raid_recover_end_io()
1882 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { in __raid_recover_end_io()
1883 if (rbio->bbio->raid_map[faila] == in __raid_recover_end_io()
1895 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { in __raid_recover_end_io()
1896 raid6_datap_recov(rbio->real_stripes, in __raid_recover_end_io()
1899 raid6_2data_recov(rbio->real_stripes, in __raid_recover_end_io()
1911 pointers[rbio->nr_data], in __raid_recover_end_io()
1916 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) in __raid_recover_end_io()
1918 pointers[rbio->nr_data - 1] = p; in __raid_recover_end_io()
1921 run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE); in __raid_recover_end_io()
1929 if (rbio->operation == BTRFS_RBIO_WRITE) { in __raid_recover_end_io()
1932 page = rbio_stripe_page(rbio, faila, i); in __raid_recover_end_io()
1936 page = rbio_stripe_page(rbio, failb, i); in __raid_recover_end_io()
1941 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1946 if (rbio->operation == BTRFS_RBIO_READ_REBUILD && in __raid_recover_end_io()
1948 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1950 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
1961 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in __raid_recover_end_io()
1963 cache_rbio_pages(rbio); in __raid_recover_end_io()
1965 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in __raid_recover_end_io()
1967 rbio_orig_end_io(rbio, err, err == 0); in __raid_recover_end_io()
1969 rbio->faila = -1; in __raid_recover_end_io()
1970 rbio->failb = -1; in __raid_recover_end_io()
1972 if (rbio->operation == BTRFS_RBIO_WRITE) in __raid_recover_end_io()
1973 finish_rmw(rbio); in __raid_recover_end_io()
1974 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) in __raid_recover_end_io()
1975 finish_parity_scrub(rbio, 0); in __raid_recover_end_io()
1979 rbio_orig_end_io(rbio, err, 0); in __raid_recover_end_io()
1989 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_recover_end_io() local
1996 fail_bio_stripe(rbio, bio); in raid_recover_end_io()
2001 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_recover_end_io()
2004 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_recover_end_io()
2005 rbio_orig_end_io(rbio, -EIO, 0); in raid_recover_end_io()
2007 __raid_recover_end_io(rbio); in raid_recover_end_io()
2018 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) in __raid56_parity_recover() argument
2023 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); in __raid56_parity_recover()
2030 ret = alloc_rbio_pages(rbio); in __raid56_parity_recover()
2034 atomic_set(&rbio->error, 0); in __raid56_parity_recover()
2041 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid56_parity_recover()
2042 if (rbio->faila == stripe || rbio->failb == stripe) { in __raid56_parity_recover()
2043 atomic_inc(&rbio->error); in __raid56_parity_recover()
2054 p = rbio_stripe_page(rbio, stripe, pagenr); in __raid56_parity_recover()
2058 ret = rbio_add_io_page(rbio, &bio_list, in __raid56_parity_recover()
2059 rbio_stripe_page(rbio, stripe, pagenr), in __raid56_parity_recover()
2060 stripe, pagenr, rbio->stripe_len); in __raid56_parity_recover()
2073 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { in __raid56_parity_recover()
2074 __raid_recover_end_io(rbio); in __raid56_parity_recover()
2085 atomic_set(&rbio->stripes_pending, bios_to_read); in __raid56_parity_recover()
2091 bio->bi_private = rbio; in __raid56_parity_recover()
2094 btrfs_bio_wq_end_io(rbio->fs_info, bio, in __raid56_parity_recover()
2104 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) in __raid56_parity_recover()
2105 rbio_orig_end_io(rbio, -EIO, 0); in __raid56_parity_recover()
2119 struct btrfs_raid_bio *rbio; in raid56_parity_recover() local
2122 rbio = alloc_rbio(root, bbio, stripe_len); in raid56_parity_recover()
2123 if (IS_ERR(rbio)) { in raid56_parity_recover()
2126 return PTR_ERR(rbio); in raid56_parity_recover()
2129 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2130 bio_list_add(&rbio->bio_list, bio); in raid56_parity_recover()
2131 rbio->bio_list_bytes = bio->bi_iter.bi_size; in raid56_parity_recover()
2133 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_parity_recover()
2134 if (rbio->faila == -1) { in raid56_parity_recover()
2138 kfree(rbio); in raid56_parity_recover()
2144 rbio->generic_bio_cnt = 1; in raid56_parity_recover()
2154 rbio->failb = rbio->real_stripes - 2; in raid56_parity_recover()
2156 ret = lock_stripe_add(rbio); in raid56_parity_recover()
2166 __raid56_parity_recover(rbio); in raid56_parity_recover()
2178 struct btrfs_raid_bio *rbio; in rmw_work() local
2180 rbio = container_of(work, struct btrfs_raid_bio, work); in rmw_work()
2181 raid56_rmw_stripe(rbio); in rmw_work()
2186 struct btrfs_raid_bio *rbio; in read_rebuild_work() local
2188 rbio = container_of(work, struct btrfs_raid_bio, work); in read_rebuild_work()
2189 __raid56_parity_recover(rbio); in read_rebuild_work()
2206 struct btrfs_raid_bio *rbio; in raid56_parity_alloc_scrub_rbio() local
2209 rbio = alloc_rbio(root, bbio, stripe_len); in raid56_parity_alloc_scrub_rbio()
2210 if (IS_ERR(rbio)) in raid56_parity_alloc_scrub_rbio()
2212 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2218 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2220 for (i = 0; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2222 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2229 ASSERT(rbio->stripe_npages == stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2230 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2232 return rbio; in raid56_parity_alloc_scrub_rbio()
2235 void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, in raid56_parity_add_scrub_pages() argument
2241 ASSERT(logical >= rbio->bbio->raid_map[0]); in raid56_parity_add_scrub_pages()
2242 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + in raid56_parity_add_scrub_pages()
2243 rbio->stripe_len * rbio->nr_data); in raid56_parity_add_scrub_pages()
2244 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); in raid56_parity_add_scrub_pages()
2246 rbio->bio_pages[index] = page; in raid56_parity_add_scrub_pages()
2253 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_essential_pages() argument
2260 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { in alloc_rbio_essential_pages()
2261 for (i = 0; i < rbio->real_stripes; i++) { in alloc_rbio_essential_pages()
2262 index = i * rbio->stripe_npages + bit; in alloc_rbio_essential_pages()
2263 if (rbio->stripe_pages[index]) in alloc_rbio_essential_pages()
2269 rbio->stripe_pages[index] = page; in alloc_rbio_essential_pages()
2282 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_parity_end_io() local
2285 fail_bio_stripe(rbio, bio); in raid_write_parity_end_io()
2289 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_write_parity_end_io()
2294 if (atomic_read(&rbio->error)) in raid_write_parity_end_io()
2297 rbio_orig_end_io(rbio, err, 0); in raid_write_parity_end_io()
2300 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, in finish_parity_scrub() argument
2303 struct btrfs_bio *bbio = rbio->bbio; in finish_parity_scrub()
2304 void *pointers[rbio->real_stripes]; in finish_parity_scrub()
2305 DECLARE_BITMAP(pbitmap, rbio->stripe_npages); in finish_parity_scrub()
2306 int nr_data = rbio->nr_data; in finish_parity_scrub()
2320 if (rbio->real_stripes - rbio->nr_data == 1) { in finish_parity_scrub()
2321 p_stripe = rbio->real_stripes - 1; in finish_parity_scrub()
2322 } else if (rbio->real_stripes - rbio->nr_data == 2) { in finish_parity_scrub()
2323 p_stripe = rbio->real_stripes - 2; in finish_parity_scrub()
2324 q_stripe = rbio->real_stripes - 1; in finish_parity_scrub()
2329 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { in finish_parity_scrub()
2331 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); in finish_parity_scrub()
2339 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2358 atomic_set(&rbio->error, 0); in finish_parity_scrub()
2360 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2365 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_parity_scrub()
2380 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_parity_scrub()
2389 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2391 if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE)) in finish_parity_scrub()
2392 memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE); in finish_parity_scrub()
2395 bitmap_clear(rbio->dbitmap, pagenr, 1); in finish_parity_scrub()
2398 for (stripe = 0; stripe < rbio->real_stripes; stripe++) in finish_parity_scrub()
2399 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_parity_scrub()
2412 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2415 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2416 ret = rbio_add_io_page(rbio, &bio_list, in finish_parity_scrub()
2417 page, rbio->scrubp, pagenr, rbio->stripe_len); in finish_parity_scrub()
2425 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2428 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2429 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_parity_scrub()
2430 bbio->tgtdev_map[rbio->scrubp], in finish_parity_scrub()
2431 pagenr, rbio->stripe_len); in finish_parity_scrub()
2440 rbio_orig_end_io(rbio, 0, 0); in finish_parity_scrub()
2444 atomic_set(&rbio->stripes_pending, nr_data); in finish_parity_scrub()
2451 bio->bi_private = rbio; in finish_parity_scrub()
2459 rbio_orig_end_io(rbio, -EIO, 0); in finish_parity_scrub()
2462 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) in is_data_stripe() argument
2464 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2476 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) in validate_rbio_for_parity_scrub() argument
2478 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in validate_rbio_for_parity_scrub()
2481 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_parity_scrub()
2484 if (is_data_stripe(rbio, rbio->faila)) in validate_rbio_for_parity_scrub()
2486 else if (is_parity_stripe(rbio->faila)) in validate_rbio_for_parity_scrub()
2487 failp = rbio->faila; in validate_rbio_for_parity_scrub()
2489 if (is_data_stripe(rbio, rbio->failb)) in validate_rbio_for_parity_scrub()
2491 else if (is_parity_stripe(rbio->failb)) in validate_rbio_for_parity_scrub()
2492 failp = rbio->failb; in validate_rbio_for_parity_scrub()
2499 if (dfail > rbio->bbio->max_errors - 1) in validate_rbio_for_parity_scrub()
2507 finish_parity_scrub(rbio, 0); in validate_rbio_for_parity_scrub()
2517 if (failp != rbio->scrubp) in validate_rbio_for_parity_scrub()
2520 __raid_recover_end_io(rbio); in validate_rbio_for_parity_scrub()
2522 finish_parity_scrub(rbio, 1); in validate_rbio_for_parity_scrub()
2527 rbio_orig_end_io(rbio, -EIO, 0); in validate_rbio_for_parity_scrub()
2540 struct btrfs_raid_bio *rbio = bio->bi_private; in raid56_parity_scrub_end_io() local
2543 fail_bio_stripe(rbio, bio); in raid56_parity_scrub_end_io()
2549 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid56_parity_scrub_end_io()
2557 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_end_io()
2560 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) in raid56_parity_scrub_stripe() argument
2569 ret = alloc_rbio_essential_pages(rbio); in raid56_parity_scrub_stripe()
2575 atomic_set(&rbio->error, 0); in raid56_parity_scrub_stripe()
2580 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in raid56_parity_scrub_stripe()
2581 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in raid56_parity_scrub_stripe()
2589 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_parity_scrub_stripe()
2593 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_parity_scrub_stripe()
2601 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_parity_scrub_stripe()
2602 stripe, pagenr, rbio->stripe_len); in raid56_parity_scrub_stripe()
2623 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_parity_scrub_stripe()
2629 bio->bi_private = rbio; in raid56_parity_scrub_stripe()
2632 btrfs_bio_wq_end_io(rbio->fs_info, bio, in raid56_parity_scrub_stripe()
2642 rbio_orig_end_io(rbio, -EIO, 0); in raid56_parity_scrub_stripe()
2646 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_stripe()
2651 struct btrfs_raid_bio *rbio; in scrub_parity_work() local
2653 rbio = container_of(work, struct btrfs_raid_bio, work); in scrub_parity_work()
2654 raid56_parity_scrub_stripe(rbio); in scrub_parity_work()
2657 static void async_scrub_parity(struct btrfs_raid_bio *rbio) in async_scrub_parity() argument
2659 btrfs_init_work(&rbio->work, btrfs_rmw_helper, in async_scrub_parity()
2662 btrfs_queue_work(rbio->fs_info->rmw_workers, in async_scrub_parity()
2663 &rbio->work); in async_scrub_parity()
2666 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) in raid56_parity_submit_scrub_rbio() argument
2668 if (!lock_stripe_add(rbio)) in raid56_parity_submit_scrub_rbio()
2669 async_scrub_parity(rbio); in raid56_parity_submit_scrub_rbio()