Lines Matching refs:rbio

180 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
181 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
184 static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
185 static void async_read_rebuild(struct btrfs_raid_bio *rbio);
186 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
187 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
188 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
189 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
190 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
192 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
194 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
255 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) in cache_rbio_pages() argument
262 ret = alloc_rbio_pages(rbio); in cache_rbio_pages()
266 for (i = 0; i < rbio->nr_pages; i++) { in cache_rbio_pages()
267 if (!rbio->bio_pages[i]) in cache_rbio_pages()
270 s = kmap(rbio->bio_pages[i]); in cache_rbio_pages()
271 d = kmap(rbio->stripe_pages[i]); in cache_rbio_pages()
275 kunmap(rbio->bio_pages[i]); in cache_rbio_pages()
276 kunmap(rbio->stripe_pages[i]); in cache_rbio_pages()
277 SetPageUptodate(rbio->stripe_pages[i]); in cache_rbio_pages()
279 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
285 static int rbio_bucket(struct btrfs_raid_bio *rbio) in rbio_bucket() argument
287 u64 num = rbio->bbio->raid_map[0]; in rbio_bucket()
348 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in __remove_rbio_from_cache() argument
350 int bucket = rbio_bucket(rbio); in __remove_rbio_from_cache()
358 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
361 table = rbio->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
373 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
375 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
376 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
389 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
390 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
391 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
392 atomic_dec(&rbio->refs); in __remove_rbio_from_cache()
393 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
398 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
402 __free_raid_bio(rbio); in __remove_rbio_from_cache()
408 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in remove_rbio_from_cache() argument
413 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
416 table = rbio->fs_info->stripe_hash_table; in remove_rbio_from_cache()
419 __remove_rbio_from_cache(rbio); in remove_rbio_from_cache()
430 struct btrfs_raid_bio *rbio; in btrfs_clear_rbio_cache() local
436 rbio = list_entry(table->stripe_cache.next, in btrfs_clear_rbio_cache()
439 __remove_rbio_from_cache(rbio); in btrfs_clear_rbio_cache()
468 static void cache_rbio(struct btrfs_raid_bio *rbio) in cache_rbio() argument
473 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
476 table = rbio->fs_info->stripe_hash_table; in cache_rbio()
479 spin_lock(&rbio->bio_list_lock); in cache_rbio()
482 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
483 atomic_inc(&rbio->refs); in cache_rbio()
485 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
486 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
488 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
492 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
501 if (found != rbio) in cache_rbio()
536 static int __rbio_is_full(struct btrfs_raid_bio *rbio) in __rbio_is_full() argument
538 unsigned long size = rbio->bio_list_bytes; in __rbio_is_full()
541 if (size != rbio->nr_data * rbio->stripe_len) in __rbio_is_full()
544 BUG_ON(size > rbio->nr_data * rbio->stripe_len); in __rbio_is_full()
548 static int rbio_is_full(struct btrfs_raid_bio *rbio) in rbio_is_full() argument
553 spin_lock_irqsave(&rbio->bio_list_lock, flags); in rbio_is_full()
554 ret = __rbio_is_full(rbio); in rbio_is_full()
555 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in rbio_is_full()
616 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_pstripe_page() argument
618 index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; in rbio_pstripe_page()
619 return rbio->stripe_pages[index]; in rbio_pstripe_page()
626 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) in rbio_qstripe_page() argument
628 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_page()
631 index += ((rbio->nr_data + 1) * rbio->stripe_len) >> in rbio_qstripe_page()
633 return rbio->stripe_pages[index]; in rbio_qstripe_page()
658 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) in lock_stripe_add() argument
660 int bucket = rbio_bucket(rbio); in lock_stripe_add()
661 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; in lock_stripe_add()
674 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { in lock_stripe_add()
685 steal_rbio(cur, rbio); in lock_stripe_add()
693 if (rbio_can_merge(cur, rbio)) { in lock_stripe_add()
694 merge_rbio(cur, rbio); in lock_stripe_add()
696 freeit = rbio; in lock_stripe_add()
712 if (rbio_can_merge(pending, rbio)) { in lock_stripe_add()
713 merge_rbio(pending, rbio); in lock_stripe_add()
715 freeit = rbio; in lock_stripe_add()
725 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
732 atomic_inc(&rbio->refs); in lock_stripe_add()
733 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
747 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) in unlock_stripe() argument
754 bucket = rbio_bucket(rbio); in unlock_stripe()
755 h = rbio->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
757 if (list_empty(&rbio->plug_list)) in unlock_stripe()
758 cache_rbio(rbio); in unlock_stripe()
761 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
763 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
769 if (list_empty(&rbio->plug_list) && in unlock_stripe()
770 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
772 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
773 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
777 list_del_init(&rbio->hash_list); in unlock_stripe()
778 atomic_dec(&rbio->refs); in unlock_stripe()
785 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
787 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
792 list_del_init(&rbio->plug_list); in unlock_stripe()
796 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
802 steal_rbio(rbio, next); in unlock_stripe()
805 steal_rbio(rbio, next); in unlock_stripe()
808 steal_rbio(rbio, next); in unlock_stripe()
818 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
825 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
830 remove_rbio_from_cache(rbio); in unlock_stripe()
833 static void __free_raid_bio(struct btrfs_raid_bio *rbio) in __free_raid_bio() argument
837 WARN_ON(atomic_read(&rbio->refs) < 0); in __free_raid_bio()
838 if (!atomic_dec_and_test(&rbio->refs)) in __free_raid_bio()
841 WARN_ON(!list_empty(&rbio->stripe_cache)); in __free_raid_bio()
842 WARN_ON(!list_empty(&rbio->hash_list)); in __free_raid_bio()
843 WARN_ON(!bio_list_empty(&rbio->bio_list)); in __free_raid_bio()
845 for (i = 0; i < rbio->nr_pages; i++) { in __free_raid_bio()
846 if (rbio->stripe_pages[i]) { in __free_raid_bio()
847 __free_page(rbio->stripe_pages[i]); in __free_raid_bio()
848 rbio->stripe_pages[i] = NULL; in __free_raid_bio()
852 btrfs_put_bbio(rbio->bbio); in __free_raid_bio()
853 kfree(rbio); in __free_raid_bio()
856 static void free_raid_bio(struct btrfs_raid_bio *rbio) in free_raid_bio() argument
858 unlock_stripe(rbio); in free_raid_bio()
859 __free_raid_bio(rbio); in free_raid_bio()
866 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err) in rbio_orig_end_io() argument
868 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
871 if (rbio->generic_bio_cnt) in rbio_orig_end_io()
872 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); in rbio_orig_end_io()
874 free_raid_bio(rbio); in rbio_orig_end_io()
891 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_end_io() local
895 fail_bio_stripe(rbio, bio); in raid_write_end_io()
899 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_write_end_io()
905 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_write_end_io()
908 rbio_orig_end_io(rbio, err); in raid_write_end_io()
928 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, in page_in_rbio() argument
934 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; in page_in_rbio()
936 spin_lock_irq(&rbio->bio_list_lock); in page_in_rbio()
937 p = rbio->bio_pages[chunk_page]; in page_in_rbio()
938 spin_unlock_irq(&rbio->bio_list_lock); in page_in_rbio()
943 return rbio->stripe_pages[chunk_page]; in page_in_rbio()
963 struct btrfs_raid_bio *rbio; in alloc_rbio() local
970 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 + in alloc_rbio()
973 if (!rbio) in alloc_rbio()
976 bio_list_init(&rbio->bio_list); in alloc_rbio()
977 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
978 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
979 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
980 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
981 rbio->bbio = bbio; in alloc_rbio()
982 rbio->fs_info = root->fs_info; in alloc_rbio()
983 rbio->stripe_len = stripe_len; in alloc_rbio()
984 rbio->nr_pages = num_pages; in alloc_rbio()
985 rbio->real_stripes = real_stripes; in alloc_rbio()
986 rbio->stripe_npages = stripe_npages; in alloc_rbio()
987 rbio->faila = -1; in alloc_rbio()
988 rbio->failb = -1; in alloc_rbio()
989 atomic_set(&rbio->refs, 1); in alloc_rbio()
990 atomic_set(&rbio->error, 0); in alloc_rbio()
991 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
997 p = rbio + 1; in alloc_rbio()
998 rbio->stripe_pages = p; in alloc_rbio()
999 rbio->bio_pages = p + sizeof(struct page *) * num_pages; in alloc_rbio()
1000 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; in alloc_rbio()
1009 rbio->nr_data = nr_data; in alloc_rbio()
1010 return rbio; in alloc_rbio()
1014 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_pages() argument
1019 for (i = 0; i < rbio->nr_pages; i++) { in alloc_rbio_pages()
1020 if (rbio->stripe_pages[i]) in alloc_rbio_pages()
1025 rbio->stripe_pages[i] = page; in alloc_rbio_pages()
1032 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_parity_pages() argument
1037 i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT; in alloc_rbio_parity_pages()
1039 for (; i < rbio->nr_pages; i++) { in alloc_rbio_parity_pages()
1040 if (rbio->stripe_pages[i]) in alloc_rbio_parity_pages()
1045 rbio->stripe_pages[i] = page; in alloc_rbio_parity_pages()
1055 static int rbio_add_io_page(struct btrfs_raid_bio *rbio, in rbio_add_io_page() argument
1069 stripe = &rbio->bbio->stripes[stripe_nr]; in rbio_add_io_page()
1074 return fail_rbio_index(rbio, stripe_nr); in rbio_add_io_page()
1115 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) in validate_rbio_for_rmw() argument
1117 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_rmw()
1118 BUG_ON(rbio->faila == rbio->real_stripes - 1); in validate_rbio_for_rmw()
1119 __raid56_parity_recover(rbio); in validate_rbio_for_rmw()
1121 finish_rmw(rbio); in validate_rbio_for_rmw()
1129 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page) in rbio_stripe_page() argument
1132 index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT); in rbio_stripe_page()
1134 return rbio->stripe_pages[index]; in rbio_stripe_page()
1145 static void index_rbio_pages(struct btrfs_raid_bio *rbio) in index_rbio_pages() argument
1154 spin_lock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1155 bio_list_for_each(bio, &rbio->bio_list) { in index_rbio_pages()
1157 stripe_offset = start - rbio->bbio->raid_map[0]; in index_rbio_pages()
1162 rbio->bio_pages[page_index + i] = p; in index_rbio_pages()
1165 spin_unlock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1176 static noinline void finish_rmw(struct btrfs_raid_bio *rbio) in finish_rmw() argument
1178 struct btrfs_bio *bbio = rbio->bbio; in finish_rmw()
1179 void *pointers[rbio->real_stripes]; in finish_rmw()
1180 int stripe_len = rbio->stripe_len; in finish_rmw()
1181 int nr_data = rbio->nr_data; in finish_rmw()
1193 if (rbio->real_stripes - rbio->nr_data == 1) { in finish_rmw()
1194 p_stripe = rbio->real_stripes - 1; in finish_rmw()
1195 } else if (rbio->real_stripes - rbio->nr_data == 2) { in finish_rmw()
1196 p_stripe = rbio->real_stripes - 2; in finish_rmw()
1197 q_stripe = rbio->real_stripes - 1; in finish_rmw()
1210 spin_lock_irq(&rbio->bio_list_lock); in finish_rmw()
1211 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in finish_rmw()
1212 spin_unlock_irq(&rbio->bio_list_lock); in finish_rmw()
1214 atomic_set(&rbio->error, 0); in finish_rmw()
1225 index_rbio_pages(rbio); in finish_rmw()
1226 if (!rbio_is_full(rbio)) in finish_rmw()
1227 cache_rbio_pages(rbio); in finish_rmw()
1229 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_rmw()
1235 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_rmw()
1240 p = rbio_pstripe_page(rbio, pagenr); in finish_rmw()
1250 p = rbio_qstripe_page(rbio, pagenr); in finish_rmw()
1254 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_rmw()
1263 for (stripe = 0; stripe < rbio->real_stripes; stripe++) in finish_rmw()
1264 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_rmw()
1272 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1275 if (stripe < rbio->nr_data) { in finish_rmw()
1276 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1280 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1283 ret = rbio_add_io_page(rbio, &bio_list, in finish_rmw()
1284 page, stripe, pagenr, rbio->stripe_len); in finish_rmw()
1293 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in finish_rmw()
1299 if (stripe < rbio->nr_data) { in finish_rmw()
1300 page = page_in_rbio(rbio, stripe, pagenr, 1); in finish_rmw()
1304 page = rbio_stripe_page(rbio, stripe, pagenr); in finish_rmw()
1307 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_rmw()
1308 rbio->bbio->tgtdev_map[stripe], in finish_rmw()
1309 pagenr, rbio->stripe_len); in finish_rmw()
1316 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); in finish_rmw()
1317 BUG_ON(atomic_read(&rbio->stripes_pending) == 0); in finish_rmw()
1324 bio->bi_private = rbio; in finish_rmw()
1331 rbio_orig_end_io(rbio, -EIO); in finish_rmw()
1339 static int find_bio_stripe(struct btrfs_raid_bio *rbio, in find_bio_stripe() argument
1349 for (i = 0; i < rbio->bbio->num_stripes; i++) { in find_bio_stripe()
1350 stripe = &rbio->bbio->stripes[i]; in find_bio_stripe()
1353 physical < stripe_start + rbio->stripe_len && in find_bio_stripe()
1366 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, in find_logical_bio_stripe() argument
1375 for (i = 0; i < rbio->nr_data; i++) { in find_logical_bio_stripe()
1376 stripe_start = rbio->bbio->raid_map[i]; in find_logical_bio_stripe()
1378 logical < stripe_start + rbio->stripe_len) { in find_logical_bio_stripe()
1388 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) in fail_rbio_index() argument
1393 spin_lock_irqsave(&rbio->bio_list_lock, flags); in fail_rbio_index()
1396 if (rbio->faila == failed || rbio->failb == failed) in fail_rbio_index()
1399 if (rbio->faila == -1) { in fail_rbio_index()
1401 rbio->faila = failed; in fail_rbio_index()
1402 atomic_inc(&rbio->error); in fail_rbio_index()
1403 } else if (rbio->failb == -1) { in fail_rbio_index()
1405 rbio->failb = failed; in fail_rbio_index()
1406 atomic_inc(&rbio->error); in fail_rbio_index()
1411 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in fail_rbio_index()
1420 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, in fail_bio_stripe() argument
1423 int failed = find_bio_stripe(rbio, bio); in fail_bio_stripe()
1428 return fail_rbio_index(rbio, failed); in fail_bio_stripe()
1456 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_rmw_end_io() local
1459 fail_bio_stripe(rbio, bio); in raid_rmw_end_io()
1465 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_rmw_end_io()
1468 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_rmw_end_io()
1476 validate_rbio_for_rmw(rbio); in raid_rmw_end_io()
1481 rbio_orig_end_io(rbio, -EIO); in raid_rmw_end_io()
1484 static void async_rmw_stripe(struct btrfs_raid_bio *rbio) in async_rmw_stripe() argument
1486 btrfs_init_work(&rbio->work, btrfs_rmw_helper, in async_rmw_stripe()
1489 btrfs_queue_work(rbio->fs_info->rmw_workers, in async_rmw_stripe()
1490 &rbio->work); in async_rmw_stripe()
1493 static void async_read_rebuild(struct btrfs_raid_bio *rbio) in async_read_rebuild() argument
1495 btrfs_init_work(&rbio->work, btrfs_rmw_helper, in async_read_rebuild()
1498 btrfs_queue_work(rbio->fs_info->rmw_workers, in async_read_rebuild()
1499 &rbio->work); in async_read_rebuild()
1506 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) in raid56_rmw_stripe() argument
1511 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); in raid56_rmw_stripe()
1518 ret = alloc_rbio_pages(rbio); in raid56_rmw_stripe()
1522 index_rbio_pages(rbio); in raid56_rmw_stripe()
1524 atomic_set(&rbio->error, 0); in raid56_rmw_stripe()
1529 for (stripe = 0; stripe < rbio->nr_data; stripe++) { in raid56_rmw_stripe()
1538 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_rmw_stripe()
1542 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_rmw_stripe()
1550 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_rmw_stripe()
1551 stripe, pagenr, rbio->stripe_len); in raid56_rmw_stripe()
1572 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_rmw_stripe()
1578 bio->bi_private = rbio; in raid56_rmw_stripe()
1581 btrfs_bio_wq_end_io(rbio->fs_info, bio, in raid56_rmw_stripe()
1590 rbio_orig_end_io(rbio, -EIO); in raid56_rmw_stripe()
1594 validate_rbio_for_rmw(rbio); in raid56_rmw_stripe()
1602 static int full_stripe_write(struct btrfs_raid_bio *rbio) in full_stripe_write() argument
1606 ret = alloc_rbio_parity_pages(rbio); in full_stripe_write()
1608 __free_raid_bio(rbio); in full_stripe_write()
1612 ret = lock_stripe_add(rbio); in full_stripe_write()
1614 finish_rmw(rbio); in full_stripe_write()
1623 static int partial_stripe_write(struct btrfs_raid_bio *rbio) in partial_stripe_write() argument
1627 ret = lock_stripe_add(rbio); in partial_stripe_write()
1629 async_rmw_stripe(rbio); in partial_stripe_write()
1639 static int __raid56_parity_write(struct btrfs_raid_bio *rbio) in __raid56_parity_write() argument
1642 if (!rbio_is_full(rbio)) in __raid56_parity_write()
1643 return partial_stripe_write(rbio); in __raid56_parity_write()
1644 return full_stripe_write(rbio); in __raid56_parity_write()
1750 struct btrfs_raid_bio *rbio; in raid56_parity_write() local
1755 rbio = alloc_rbio(root, bbio, stripe_len); in raid56_parity_write()
1756 if (IS_ERR(rbio)) { in raid56_parity_write()
1758 return PTR_ERR(rbio); in raid56_parity_write()
1760 bio_list_add(&rbio->bio_list, bio); in raid56_parity_write()
1761 rbio->bio_list_bytes = bio->bi_iter.bi_size; in raid56_parity_write()
1762 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1765 rbio->generic_bio_cnt = 1; in raid56_parity_write()
1771 if (rbio_is_full(rbio)) { in raid56_parity_write()
1772 ret = full_stripe_write(rbio); in raid56_parity_write()
1786 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1789 ret = __raid56_parity_write(rbio); in raid56_parity_write()
1801 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) in __raid_recover_end_io() argument
1806 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); in __raid_recover_end_io()
1811 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in __raid_recover_end_io()
1817 faila = rbio->faila; in __raid_recover_end_io()
1818 failb = rbio->failb; in __raid_recover_end_io()
1820 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1821 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
1822 spin_lock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1823 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in __raid_recover_end_io()
1824 spin_unlock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1827 index_rbio_pages(rbio); in __raid_recover_end_io()
1834 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in __raid_recover_end_io()
1835 !test_bit(pagenr, rbio->dbitmap)) in __raid_recover_end_io()
1841 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1846 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1847 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1849 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1851 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
1857 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { in __raid_recover_end_io()
1863 if (faila == rbio->nr_data) { in __raid_recover_end_io()
1892 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { in __raid_recover_end_io()
1893 if (rbio->bbio->raid_map[faila] == in __raid_recover_end_io()
1905 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { in __raid_recover_end_io()
1906 raid6_datap_recov(rbio->real_stripes, in __raid_recover_end_io()
1909 raid6_2data_recov(rbio->real_stripes, in __raid_recover_end_io()
1921 pointers[rbio->nr_data], in __raid_recover_end_io()
1926 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) in __raid_recover_end_io()
1928 pointers[rbio->nr_data - 1] = p; in __raid_recover_end_io()
1931 run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE); in __raid_recover_end_io()
1939 if (rbio->operation == BTRFS_RBIO_WRITE) { in __raid_recover_end_io()
1942 page = rbio_stripe_page(rbio, faila, i); in __raid_recover_end_io()
1946 page = rbio_stripe_page(rbio, failb, i); in __raid_recover_end_io()
1951 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1956 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1957 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1959 page = page_in_rbio(rbio, stripe, pagenr, 0); in __raid_recover_end_io()
1961 page = rbio_stripe_page(rbio, stripe, pagenr); in __raid_recover_end_io()
1972 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { in __raid_recover_end_io()
1974 cache_rbio_pages(rbio); in __raid_recover_end_io()
1976 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in __raid_recover_end_io()
1978 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
1979 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
1980 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
1982 rbio->faila = -1; in __raid_recover_end_io()
1983 rbio->failb = -1; in __raid_recover_end_io()
1985 if (rbio->operation == BTRFS_RBIO_WRITE) in __raid_recover_end_io()
1986 finish_rmw(rbio); in __raid_recover_end_io()
1987 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) in __raid_recover_end_io()
1988 finish_parity_scrub(rbio, 0); in __raid_recover_end_io()
1992 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
2002 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_recover_end_io() local
2009 fail_bio_stripe(rbio, bio); in raid_recover_end_io()
2014 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_recover_end_io()
2017 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in raid_recover_end_io()
2018 rbio_orig_end_io(rbio, -EIO); in raid_recover_end_io()
2020 __raid_recover_end_io(rbio); in raid_recover_end_io()
2031 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) in __raid56_parity_recover() argument
2036 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); in __raid56_parity_recover()
2043 ret = alloc_rbio_pages(rbio); in __raid56_parity_recover()
2047 atomic_set(&rbio->error, 0); in __raid56_parity_recover()
2054 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid56_parity_recover()
2055 if (rbio->faila == stripe || rbio->failb == stripe) { in __raid56_parity_recover()
2056 atomic_inc(&rbio->error); in __raid56_parity_recover()
2067 p = rbio_stripe_page(rbio, stripe, pagenr); in __raid56_parity_recover()
2071 ret = rbio_add_io_page(rbio, &bio_list, in __raid56_parity_recover()
2072 rbio_stripe_page(rbio, stripe, pagenr), in __raid56_parity_recover()
2073 stripe, pagenr, rbio->stripe_len); in __raid56_parity_recover()
2086 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { in __raid56_parity_recover()
2087 __raid_recover_end_io(rbio); in __raid56_parity_recover()
2098 atomic_set(&rbio->stripes_pending, bios_to_read); in __raid56_parity_recover()
2104 bio->bi_private = rbio; in __raid56_parity_recover()
2107 btrfs_bio_wq_end_io(rbio->fs_info, bio, in __raid56_parity_recover()
2116 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid56_parity_recover()
2117 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) in __raid56_parity_recover()
2118 rbio_orig_end_io(rbio, -EIO); in __raid56_parity_recover()
2132 struct btrfs_raid_bio *rbio; in raid56_parity_recover() local
2135 rbio = alloc_rbio(root, bbio, stripe_len); in raid56_parity_recover()
2136 if (IS_ERR(rbio)) { in raid56_parity_recover()
2139 return PTR_ERR(rbio); in raid56_parity_recover()
2142 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2143 bio_list_add(&rbio->bio_list, bio); in raid56_parity_recover()
2144 rbio->bio_list_bytes = bio->bi_iter.bi_size; in raid56_parity_recover()
2146 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_parity_recover()
2147 if (rbio->faila == -1) { in raid56_parity_recover()
2151 kfree(rbio); in raid56_parity_recover()
2157 rbio->generic_bio_cnt = 1; in raid56_parity_recover()
2167 rbio->failb = rbio->real_stripes - 2; in raid56_parity_recover()
2169 ret = lock_stripe_add(rbio); in raid56_parity_recover()
2179 __raid56_parity_recover(rbio); in raid56_parity_recover()
2191 struct btrfs_raid_bio *rbio; in rmw_work() local
2193 rbio = container_of(work, struct btrfs_raid_bio, work); in rmw_work()
2194 raid56_rmw_stripe(rbio); in rmw_work()
2199 struct btrfs_raid_bio *rbio; in read_rebuild_work() local
2201 rbio = container_of(work, struct btrfs_raid_bio, work); in read_rebuild_work()
2202 __raid56_parity_recover(rbio); in read_rebuild_work()
2219 struct btrfs_raid_bio *rbio; in raid56_parity_alloc_scrub_rbio() local
2222 rbio = alloc_rbio(root, bbio, stripe_len); in raid56_parity_alloc_scrub_rbio()
2223 if (IS_ERR(rbio)) in raid56_parity_alloc_scrub_rbio()
2225 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2231 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2233 for (i = 0; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2235 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2242 ASSERT(rbio->stripe_npages == stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2243 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2245 return rbio; in raid56_parity_alloc_scrub_rbio()
2249 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, in raid56_add_scrub_pages() argument
2255 ASSERT(logical >= rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2256 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + in raid56_add_scrub_pages()
2257 rbio->stripe_len * rbio->nr_data); in raid56_add_scrub_pages()
2258 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); in raid56_add_scrub_pages()
2260 rbio->bio_pages[index] = page; in raid56_add_scrub_pages()
2267 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_essential_pages() argument
2274 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { in alloc_rbio_essential_pages()
2275 for (i = 0; i < rbio->real_stripes; i++) { in alloc_rbio_essential_pages()
2276 index = i * rbio->stripe_npages + bit; in alloc_rbio_essential_pages()
2277 if (rbio->stripe_pages[index]) in alloc_rbio_essential_pages()
2283 rbio->stripe_pages[index] = page; in alloc_rbio_essential_pages()
2296 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_parity_end_io() local
2300 fail_bio_stripe(rbio, bio); in raid_write_parity_end_io()
2304 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_write_parity_end_io()
2309 if (atomic_read(&rbio->error)) in raid_write_parity_end_io()
2312 rbio_orig_end_io(rbio, err); in raid_write_parity_end_io()
2315 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, in finish_parity_scrub() argument
2318 struct btrfs_bio *bbio = rbio->bbio; in finish_parity_scrub()
2319 void *pointers[rbio->real_stripes]; in finish_parity_scrub()
2320 DECLARE_BITMAP(pbitmap, rbio->stripe_npages); in finish_parity_scrub()
2321 int nr_data = rbio->nr_data; in finish_parity_scrub()
2335 if (rbio->real_stripes - rbio->nr_data == 1) { in finish_parity_scrub()
2336 p_stripe = rbio->real_stripes - 1; in finish_parity_scrub()
2337 } else if (rbio->real_stripes - rbio->nr_data == 2) { in finish_parity_scrub()
2338 p_stripe = rbio->real_stripes - 2; in finish_parity_scrub()
2339 q_stripe = rbio->real_stripes - 1; in finish_parity_scrub()
2344 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { in finish_parity_scrub()
2346 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); in finish_parity_scrub()
2354 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2373 atomic_set(&rbio->error, 0); in finish_parity_scrub()
2375 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2380 p = page_in_rbio(rbio, stripe, pagenr, 0); in finish_parity_scrub()
2395 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, in finish_parity_scrub()
2404 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2406 if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE)) in finish_parity_scrub()
2407 memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE); in finish_parity_scrub()
2410 bitmap_clear(rbio->dbitmap, pagenr, 1); in finish_parity_scrub()
2413 for (stripe = 0; stripe < rbio->real_stripes; stripe++) in finish_parity_scrub()
2414 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); in finish_parity_scrub()
2427 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2430 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2431 ret = rbio_add_io_page(rbio, &bio_list, in finish_parity_scrub()
2432 page, rbio->scrubp, pagenr, rbio->stripe_len); in finish_parity_scrub()
2440 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { in finish_parity_scrub()
2443 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); in finish_parity_scrub()
2444 ret = rbio_add_io_page(rbio, &bio_list, page, in finish_parity_scrub()
2445 bbio->tgtdev_map[rbio->scrubp], in finish_parity_scrub()
2446 pagenr, rbio->stripe_len); in finish_parity_scrub()
2455 rbio_orig_end_io(rbio, 0); in finish_parity_scrub()
2459 atomic_set(&rbio->stripes_pending, nr_data); in finish_parity_scrub()
2466 bio->bi_private = rbio; in finish_parity_scrub()
2473 rbio_orig_end_io(rbio, -EIO); in finish_parity_scrub()
2476 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) in is_data_stripe() argument
2478 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2490 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) in validate_rbio_for_parity_scrub() argument
2492 if (atomic_read(&rbio->error) > rbio->bbio->max_errors) in validate_rbio_for_parity_scrub()
2495 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_parity_scrub()
2498 if (is_data_stripe(rbio, rbio->faila)) in validate_rbio_for_parity_scrub()
2500 else if (is_parity_stripe(rbio->faila)) in validate_rbio_for_parity_scrub()
2501 failp = rbio->faila; in validate_rbio_for_parity_scrub()
2503 if (is_data_stripe(rbio, rbio->failb)) in validate_rbio_for_parity_scrub()
2505 else if (is_parity_stripe(rbio->failb)) in validate_rbio_for_parity_scrub()
2506 failp = rbio->failb; in validate_rbio_for_parity_scrub()
2513 if (dfail > rbio->bbio->max_errors - 1) in validate_rbio_for_parity_scrub()
2521 finish_parity_scrub(rbio, 0); in validate_rbio_for_parity_scrub()
2531 if (failp != rbio->scrubp) in validate_rbio_for_parity_scrub()
2534 __raid_recover_end_io(rbio); in validate_rbio_for_parity_scrub()
2536 finish_parity_scrub(rbio, 1); in validate_rbio_for_parity_scrub()
2541 rbio_orig_end_io(rbio, -EIO); in validate_rbio_for_parity_scrub()
2554 struct btrfs_raid_bio *rbio = bio->bi_private; in raid56_parity_scrub_end_io() local
2557 fail_bio_stripe(rbio, bio); in raid56_parity_scrub_end_io()
2563 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid56_parity_scrub_end_io()
2571 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_end_io()
2574 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) in raid56_parity_scrub_stripe() argument
2583 ret = alloc_rbio_essential_pages(rbio); in raid56_parity_scrub_stripe()
2589 atomic_set(&rbio->error, 0); in raid56_parity_scrub_stripe()
2594 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in raid56_parity_scrub_stripe()
2595 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { in raid56_parity_scrub_stripe()
2603 page = page_in_rbio(rbio, stripe, pagenr, 1); in raid56_parity_scrub_stripe()
2607 page = rbio_stripe_page(rbio, stripe, pagenr); in raid56_parity_scrub_stripe()
2615 ret = rbio_add_io_page(rbio, &bio_list, page, in raid56_parity_scrub_stripe()
2616 stripe, pagenr, rbio->stripe_len); in raid56_parity_scrub_stripe()
2637 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_parity_scrub_stripe()
2643 bio->bi_private = rbio; in raid56_parity_scrub_stripe()
2646 btrfs_bio_wq_end_io(rbio->fs_info, bio, in raid56_parity_scrub_stripe()
2655 rbio_orig_end_io(rbio, -EIO); in raid56_parity_scrub_stripe()
2659 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_stripe()
2664 struct btrfs_raid_bio *rbio; in scrub_parity_work() local
2666 rbio = container_of(work, struct btrfs_raid_bio, work); in scrub_parity_work()
2667 raid56_parity_scrub_stripe(rbio); in scrub_parity_work()
2670 static void async_scrub_parity(struct btrfs_raid_bio *rbio) in async_scrub_parity() argument
2672 btrfs_init_work(&rbio->work, btrfs_rmw_helper, in async_scrub_parity()
2675 btrfs_queue_work(rbio->fs_info->rmw_workers, in async_scrub_parity()
2676 &rbio->work); in async_scrub_parity()
2679 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) in raid56_parity_submit_scrub_rbio() argument
2681 if (!lock_stripe_add(rbio)) in raid56_parity_submit_scrub_rbio()
2682 async_scrub_parity(rbio); in raid56_parity_submit_scrub_rbio()
2691 struct btrfs_raid_bio *rbio; in raid56_alloc_missing_rbio() local
2693 rbio = alloc_rbio(root, bbio, length); in raid56_alloc_missing_rbio()
2694 if (IS_ERR(rbio)) in raid56_alloc_missing_rbio()
2697 rbio->operation = BTRFS_RBIO_REBUILD_MISSING; in raid56_alloc_missing_rbio()
2698 bio_list_add(&rbio->bio_list, bio); in raid56_alloc_missing_rbio()
2705 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_alloc_missing_rbio()
2706 if (rbio->faila == -1) { in raid56_alloc_missing_rbio()
2708 kfree(rbio); in raid56_alloc_missing_rbio()
2712 return rbio; in raid56_alloc_missing_rbio()
2717 struct btrfs_raid_bio *rbio; in missing_raid56_work() local
2719 rbio = container_of(work, struct btrfs_raid_bio, work); in missing_raid56_work()
2720 __raid56_parity_recover(rbio); in missing_raid56_work()
2723 static void async_missing_raid56(struct btrfs_raid_bio *rbio) in async_missing_raid56() argument
2725 btrfs_init_work(&rbio->work, btrfs_rmw_helper, in async_missing_raid56()
2728 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); in async_missing_raid56()
2731 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) in raid56_submit_missing_rbio() argument
2733 if (!lock_stripe_add(rbio)) in raid56_submit_missing_rbio()
2734 async_missing_raid56(rbio); in raid56_submit_missing_rbio()