This source file includes following definitions.
- start_async_work
- btrfs_alloc_stripe_hash_table
- cache_rbio_pages
- rbio_bucket
- steal_rbio
- merge_rbio
- __remove_rbio_from_cache
- remove_rbio_from_cache
- btrfs_clear_rbio_cache
- btrfs_free_stripe_hash_table
- cache_rbio
- run_xor
- rbio_is_full
- rbio_can_merge
- rbio_stripe_page_index
- rbio_stripe_page
- rbio_pstripe_page
- rbio_qstripe_page
- lock_stripe_add
- unlock_stripe
- __free_raid_bio
- rbio_endio_bio_list
- rbio_orig_end_io
- raid_write_end_io
- page_in_rbio
- rbio_nr_pages
- alloc_rbio
- alloc_rbio_pages
- alloc_rbio_parity_pages
- rbio_add_io_page
- validate_rbio_for_rmw
- index_rbio_pages
- finish_rmw
- find_bio_stripe
- find_logical_bio_stripe
- fail_rbio_index
- fail_bio_stripe
- set_bio_pages_uptodate
- raid_rmw_end_io
- raid56_rmw_stripe
- full_stripe_write
- partial_stripe_write
- __raid56_parity_write
- plug_cmp
- run_plug
- unplug_work
- btrfs_raid_unplug
- raid56_parity_write
- __raid_recover_end_io
- raid_recover_end_io
- __raid56_parity_recover
- raid56_parity_recover
- rmw_work
- read_rebuild_work
- raid56_parity_alloc_scrub_rbio
- raid56_add_scrub_pages
- alloc_rbio_essential_pages
- finish_parity_scrub
- is_data_stripe
- validate_rbio_for_parity_scrub
- raid56_parity_scrub_end_io
- raid56_parity_scrub_stripe
- scrub_parity_work
- raid56_parity_submit_scrub_rbio
- raid56_alloc_missing_rbio
- raid56_submit_missing_rbio
1
2
3
4
5
6
7 #include <linux/sched.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
15 #include <linux/mm.h>
16 #include "ctree.h"
17 #include "disk-io.h"
18 #include "volumes.h"
19 #include "raid56.h"
20 #include "async-thread.h"
21
22
23 #define RBIO_RMW_LOCKED_BIT 1
24
25
26
27
28
29 #define RBIO_CACHE_BIT 2
30
31
32
33
34 #define RBIO_CACHE_READY_BIT 3
35
36 #define RBIO_CACHE_SIZE 1024
37
38 #define BTRFS_STRIPE_HASH_TABLE_BITS 11
39
40
41 struct btrfs_stripe_hash {
42 struct list_head hash_list;
43 spinlock_t lock;
44 };
45
46
47 struct btrfs_stripe_hash_table {
48 struct list_head stripe_cache;
49 spinlock_t cache_lock;
50 int cache_size;
51 struct btrfs_stripe_hash table[];
52 };
53
54 enum btrfs_rbio_ops {
55 BTRFS_RBIO_WRITE,
56 BTRFS_RBIO_READ_REBUILD,
57 BTRFS_RBIO_PARITY_SCRUB,
58 BTRFS_RBIO_REBUILD_MISSING,
59 };
60
61 struct btrfs_raid_bio {
62 struct btrfs_fs_info *fs_info;
63 struct btrfs_bio *bbio;
64
65
66
67
68
69
70 struct list_head hash_list;
71
72
73
74
75 struct list_head stripe_cache;
76
77
78
79
80 struct btrfs_work work;
81
82
83
84
85
86
87 struct bio_list bio_list;
88 spinlock_t bio_list_lock;
89
90
91
92
93
94
95
96 struct list_head plug_list;
97
98
99
100
101
102 unsigned long flags;
103
104
105 int stripe_len;
106
107
108 int nr_data;
109
110 int real_stripes;
111
112 int stripe_npages;
113
114
115
116
117
118
119 enum btrfs_rbio_ops operation;
120
121
122 int faila;
123
124
125 int failb;
126
127 int scrubp;
128
129
130
131
132 int nr_pages;
133
134
135
136
137
138
139 int bio_list_bytes;
140
141 int generic_bio_cnt;
142
143 refcount_t refs;
144
145 atomic_t stripes_pending;
146
147 atomic_t error;
148
149
150
151
152
153
154
155
156
157 struct page **stripe_pages;
158
159
160
161
162
163 struct page **bio_pages;
164
165
166
167
168 unsigned long *dbitmap;
169
170
171 void **finish_pointers;
172
173
174 unsigned long *finish_pbitmap;
175 };
176
177 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
178 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
179 static void rmw_work(struct btrfs_work *work);
180 static void read_rebuild_work(struct btrfs_work *work);
181 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
182 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
183 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
184 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
185 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
186
187 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
188 int need_check);
189 static void scrub_parity_work(struct btrfs_work *work);
190
191 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
192 {
193 btrfs_init_work(&rbio->work, work_func, NULL, NULL);
194 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
195 }
196
197
198
199
200
201 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
202 {
203 struct btrfs_stripe_hash_table *table;
204 struct btrfs_stripe_hash_table *x;
205 struct btrfs_stripe_hash *cur;
206 struct btrfs_stripe_hash *h;
207 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
208 int i;
209 int table_size;
210
211 if (info->stripe_hash_table)
212 return 0;
213
214
215
216
217
218
219
220
221 table_size = sizeof(*table) + sizeof(*h) * num_entries;
222 table = kvzalloc(table_size, GFP_KERNEL);
223 if (!table)
224 return -ENOMEM;
225
226 spin_lock_init(&table->cache_lock);
227 INIT_LIST_HEAD(&table->stripe_cache);
228
229 h = table->table;
230
231 for (i = 0; i < num_entries; i++) {
232 cur = h + i;
233 INIT_LIST_HEAD(&cur->hash_list);
234 spin_lock_init(&cur->lock);
235 }
236
237 x = cmpxchg(&info->stripe_hash_table, NULL, table);
238 if (x)
239 kvfree(x);
240 return 0;
241 }
242
243
244
245
246
247
248
249
250
251
252 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
253 {
254 int i;
255 char *s;
256 char *d;
257 int ret;
258
259 ret = alloc_rbio_pages(rbio);
260 if (ret)
261 return;
262
263 for (i = 0; i < rbio->nr_pages; i++) {
264 if (!rbio->bio_pages[i])
265 continue;
266
267 s = kmap(rbio->bio_pages[i]);
268 d = kmap(rbio->stripe_pages[i]);
269
270 copy_page(d, s);
271
272 kunmap(rbio->bio_pages[i]);
273 kunmap(rbio->stripe_pages[i]);
274 SetPageUptodate(rbio->stripe_pages[i]);
275 }
276 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
277 }
278
279
280
281
282 static int rbio_bucket(struct btrfs_raid_bio *rbio)
283 {
284 u64 num = rbio->bbio->raid_map[0];
285
286
287
288
289
290
291
292
293
294 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
295 }
296
297
298
299
300
301 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
302 {
303 int i;
304 struct page *s;
305 struct page *d;
306
307 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
308 return;
309
310 for (i = 0; i < dest->nr_pages; i++) {
311 s = src->stripe_pages[i];
312 if (!s || !PageUptodate(s)) {
313 continue;
314 }
315
316 d = dest->stripe_pages[i];
317 if (d)
318 __free_page(d);
319
320 dest->stripe_pages[i] = s;
321 src->stripe_pages[i] = NULL;
322 }
323 }
324
325
326
327
328
329
330
331
332 static void merge_rbio(struct btrfs_raid_bio *dest,
333 struct btrfs_raid_bio *victim)
334 {
335 bio_list_merge(&dest->bio_list, &victim->bio_list);
336 dest->bio_list_bytes += victim->bio_list_bytes;
337 dest->generic_bio_cnt += victim->generic_bio_cnt;
338 bio_list_init(&victim->bio_list);
339 }
340
341
342
343
344
345 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
346 {
347 int bucket = rbio_bucket(rbio);
348 struct btrfs_stripe_hash_table *table;
349 struct btrfs_stripe_hash *h;
350 int freeit = 0;
351
352
353
354
355 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
356 return;
357
358 table = rbio->fs_info->stripe_hash_table;
359 h = table->table + bucket;
360
361
362
363
364 spin_lock(&h->lock);
365
366
367
368
369
370 spin_lock(&rbio->bio_list_lock);
371
372 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
373 list_del_init(&rbio->stripe_cache);
374 table->cache_size -= 1;
375 freeit = 1;
376
377
378
379
380
381
382
383
384
385
386 if (bio_list_empty(&rbio->bio_list)) {
387 if (!list_empty(&rbio->hash_list)) {
388 list_del_init(&rbio->hash_list);
389 refcount_dec(&rbio->refs);
390 BUG_ON(!list_empty(&rbio->plug_list));
391 }
392 }
393 }
394
395 spin_unlock(&rbio->bio_list_lock);
396 spin_unlock(&h->lock);
397
398 if (freeit)
399 __free_raid_bio(rbio);
400 }
401
402
403
404
405 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
406 {
407 struct btrfs_stripe_hash_table *table;
408 unsigned long flags;
409
410 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
411 return;
412
413 table = rbio->fs_info->stripe_hash_table;
414
415 spin_lock_irqsave(&table->cache_lock, flags);
416 __remove_rbio_from_cache(rbio);
417 spin_unlock_irqrestore(&table->cache_lock, flags);
418 }
419
420
421
422
423 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
424 {
425 struct btrfs_stripe_hash_table *table;
426 unsigned long flags;
427 struct btrfs_raid_bio *rbio;
428
429 table = info->stripe_hash_table;
430
431 spin_lock_irqsave(&table->cache_lock, flags);
432 while (!list_empty(&table->stripe_cache)) {
433 rbio = list_entry(table->stripe_cache.next,
434 struct btrfs_raid_bio,
435 stripe_cache);
436 __remove_rbio_from_cache(rbio);
437 }
438 spin_unlock_irqrestore(&table->cache_lock, flags);
439 }
440
441
442
443
444
445 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
446 {
447 if (!info->stripe_hash_table)
448 return;
449 btrfs_clear_rbio_cache(info);
450 kvfree(info->stripe_hash_table);
451 info->stripe_hash_table = NULL;
452 }
453
454
455
456
457
458
459
460
461
462
463
464
465 static void cache_rbio(struct btrfs_raid_bio *rbio)
466 {
467 struct btrfs_stripe_hash_table *table;
468 unsigned long flags;
469
470 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
471 return;
472
473 table = rbio->fs_info->stripe_hash_table;
474
475 spin_lock_irqsave(&table->cache_lock, flags);
476 spin_lock(&rbio->bio_list_lock);
477
478
479 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
480 refcount_inc(&rbio->refs);
481
482 if (!list_empty(&rbio->stripe_cache)){
483 list_move(&rbio->stripe_cache, &table->stripe_cache);
484 } else {
485 list_add(&rbio->stripe_cache, &table->stripe_cache);
486 table->cache_size += 1;
487 }
488
489 spin_unlock(&rbio->bio_list_lock);
490
491 if (table->cache_size > RBIO_CACHE_SIZE) {
492 struct btrfs_raid_bio *found;
493
494 found = list_entry(table->stripe_cache.prev,
495 struct btrfs_raid_bio,
496 stripe_cache);
497
498 if (found != rbio)
499 __remove_rbio_from_cache(found);
500 }
501
502 spin_unlock_irqrestore(&table->cache_lock, flags);
503 }
504
505
506
507
508
509
510 static void run_xor(void **pages, int src_cnt, ssize_t len)
511 {
512 int src_off = 0;
513 int xor_src_cnt = 0;
514 void *dest = pages[src_cnt];
515
516 while(src_cnt > 0) {
517 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
518 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
519
520 src_cnt -= xor_src_cnt;
521 src_off += xor_src_cnt;
522 }
523 }
524
525
526
527
528
529 static int rbio_is_full(struct btrfs_raid_bio *rbio)
530 {
531 unsigned long flags;
532 unsigned long size = rbio->bio_list_bytes;
533 int ret = 1;
534
535 spin_lock_irqsave(&rbio->bio_list_lock, flags);
536 if (size != rbio->nr_data * rbio->stripe_len)
537 ret = 0;
538 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
539 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
540
541 return ret;
542 }
543
544
545
546
547
548
549
550
551
552
553
554 static int rbio_can_merge(struct btrfs_raid_bio *last,
555 struct btrfs_raid_bio *cur)
556 {
557 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
558 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
559 return 0;
560
561
562
563
564
565
566
567
568 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
569 test_bit(RBIO_CACHE_BIT, &cur->flags))
570 return 0;
571
572 if (last->bbio->raid_map[0] !=
573 cur->bbio->raid_map[0])
574 return 0;
575
576
577 if (last->operation != cur->operation)
578 return 0;
579
580
581
582
583
584
585
586
587 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
588 return 0;
589
590 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
591 return 0;
592
593 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
594 int fa = last->faila;
595 int fb = last->failb;
596 int cur_fa = cur->faila;
597 int cur_fb = cur->failb;
598
599 if (last->faila >= last->failb) {
600 fa = last->failb;
601 fb = last->faila;
602 }
603
604 if (cur->faila >= cur->failb) {
605 cur_fa = cur->failb;
606 cur_fb = cur->faila;
607 }
608
609 if (fa != cur_fa || fb != cur_fb)
610 return 0;
611 }
612 return 1;
613 }
614
615 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
616 int index)
617 {
618 return stripe * rbio->stripe_npages + index;
619 }
620
621
622
623
624
625 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
626 int index)
627 {
628 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
629 }
630
631
632
633
634 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
635 {
636 return rbio_stripe_page(rbio, rbio->nr_data, index);
637 }
638
639
640
641
642
643 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
644 {
645 if (rbio->nr_data + 1 == rbio->real_stripes)
646 return NULL;
647 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
648 }
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
673 {
674 int bucket = rbio_bucket(rbio);
675 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
676 struct btrfs_raid_bio *cur;
677 struct btrfs_raid_bio *pending;
678 unsigned long flags;
679 struct btrfs_raid_bio *freeit = NULL;
680 struct btrfs_raid_bio *cache_drop = NULL;
681 int ret = 0;
682
683 spin_lock_irqsave(&h->lock, flags);
684 list_for_each_entry(cur, &h->hash_list, hash_list) {
685 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
686 spin_lock(&cur->bio_list_lock);
687
688
689 if (bio_list_empty(&cur->bio_list) &&
690 list_empty(&cur->plug_list) &&
691 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
692 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
693 list_del_init(&cur->hash_list);
694 refcount_dec(&cur->refs);
695
696 steal_rbio(cur, rbio);
697 cache_drop = cur;
698 spin_unlock(&cur->bio_list_lock);
699
700 goto lockit;
701 }
702
703
704 if (rbio_can_merge(cur, rbio)) {
705 merge_rbio(cur, rbio);
706 spin_unlock(&cur->bio_list_lock);
707 freeit = rbio;
708 ret = 1;
709 goto out;
710 }
711
712
713
714
715
716
717
718
719
720
721 list_for_each_entry(pending, &cur->plug_list,
722 plug_list) {
723 if (rbio_can_merge(pending, rbio)) {
724 merge_rbio(pending, rbio);
725 spin_unlock(&cur->bio_list_lock);
726 freeit = rbio;
727 ret = 1;
728 goto out;
729 }
730 }
731
732
733
734
735
736 list_add_tail(&rbio->plug_list, &cur->plug_list);
737 spin_unlock(&cur->bio_list_lock);
738 ret = 1;
739 goto out;
740 }
741 }
742 lockit:
743 refcount_inc(&rbio->refs);
744 list_add(&rbio->hash_list, &h->hash_list);
745 out:
746 spin_unlock_irqrestore(&h->lock, flags);
747 if (cache_drop)
748 remove_rbio_from_cache(cache_drop);
749 if (freeit)
750 __free_raid_bio(freeit);
751 return ret;
752 }
753
754
755
756
757
758 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
759 {
760 int bucket;
761 struct btrfs_stripe_hash *h;
762 unsigned long flags;
763 int keep_cache = 0;
764
765 bucket = rbio_bucket(rbio);
766 h = rbio->fs_info->stripe_hash_table->table + bucket;
767
768 if (list_empty(&rbio->plug_list))
769 cache_rbio(rbio);
770
771 spin_lock_irqsave(&h->lock, flags);
772 spin_lock(&rbio->bio_list_lock);
773
774 if (!list_empty(&rbio->hash_list)) {
775
776
777
778
779
780 if (list_empty(&rbio->plug_list) &&
781 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
782 keep_cache = 1;
783 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
784 BUG_ON(!bio_list_empty(&rbio->bio_list));
785 goto done;
786 }
787
788 list_del_init(&rbio->hash_list);
789 refcount_dec(&rbio->refs);
790
791
792
793
794
795
796 if (!list_empty(&rbio->plug_list)) {
797 struct btrfs_raid_bio *next;
798 struct list_head *head = rbio->plug_list.next;
799
800 next = list_entry(head, struct btrfs_raid_bio,
801 plug_list);
802
803 list_del_init(&rbio->plug_list);
804
805 list_add(&next->hash_list, &h->hash_list);
806 refcount_inc(&next->refs);
807 spin_unlock(&rbio->bio_list_lock);
808 spin_unlock_irqrestore(&h->lock, flags);
809
810 if (next->operation == BTRFS_RBIO_READ_REBUILD)
811 start_async_work(next, read_rebuild_work);
812 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
813 steal_rbio(rbio, next);
814 start_async_work(next, read_rebuild_work);
815 } else if (next->operation == BTRFS_RBIO_WRITE) {
816 steal_rbio(rbio, next);
817 start_async_work(next, rmw_work);
818 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
819 steal_rbio(rbio, next);
820 start_async_work(next, scrub_parity_work);
821 }
822
823 goto done_nolock;
824 }
825 }
826 done:
827 spin_unlock(&rbio->bio_list_lock);
828 spin_unlock_irqrestore(&h->lock, flags);
829
830 done_nolock:
831 if (!keep_cache)
832 remove_rbio_from_cache(rbio);
833 }
834
835 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
836 {
837 int i;
838
839 if (!refcount_dec_and_test(&rbio->refs))
840 return;
841
842 WARN_ON(!list_empty(&rbio->stripe_cache));
843 WARN_ON(!list_empty(&rbio->hash_list));
844 WARN_ON(!bio_list_empty(&rbio->bio_list));
845
846 for (i = 0; i < rbio->nr_pages; i++) {
847 if (rbio->stripe_pages[i]) {
848 __free_page(rbio->stripe_pages[i]);
849 rbio->stripe_pages[i] = NULL;
850 }
851 }
852
853 btrfs_put_bbio(rbio->bbio);
854 kfree(rbio);
855 }
856
857 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
858 {
859 struct bio *next;
860
861 while (cur) {
862 next = cur->bi_next;
863 cur->bi_next = NULL;
864 cur->bi_status = err;
865 bio_endio(cur);
866 cur = next;
867 }
868 }
869
870
871
872
873
874 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
875 {
876 struct bio *cur = bio_list_get(&rbio->bio_list);
877 struct bio *extra;
878
879 if (rbio->generic_bio_cnt)
880 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
881
882
883
884
885
886
887
888
889
890 unlock_stripe(rbio);
891 extra = bio_list_get(&rbio->bio_list);
892 __free_raid_bio(rbio);
893
894 rbio_endio_bio_list(cur, err);
895 if (extra)
896 rbio_endio_bio_list(extra, err);
897 }
898
899
900
901
902
903 static void raid_write_end_io(struct bio *bio)
904 {
905 struct btrfs_raid_bio *rbio = bio->bi_private;
906 blk_status_t err = bio->bi_status;
907 int max_errors;
908
909 if (err)
910 fail_bio_stripe(rbio, bio);
911
912 bio_put(bio);
913
914 if (!atomic_dec_and_test(&rbio->stripes_pending))
915 return;
916
917 err = BLK_STS_OK;
918
919
920 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
921 0 : rbio->bbio->max_errors;
922 if (atomic_read(&rbio->error) > max_errors)
923 err = BLK_STS_IOERR;
924
925 rbio_orig_end_io(rbio, err);
926 }
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
945 int index, int pagenr, int bio_list_only)
946 {
947 int chunk_page;
948 struct page *p = NULL;
949
950 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
951
952 spin_lock_irq(&rbio->bio_list_lock);
953 p = rbio->bio_pages[chunk_page];
954 spin_unlock_irq(&rbio->bio_list_lock);
955
956 if (p || bio_list_only)
957 return p;
958
959 return rbio->stripe_pages[chunk_page];
960 }
961
962
963
964
965
966 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
967 {
968 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
969 }
970
971
972
973
974
975 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
976 struct btrfs_bio *bbio,
977 u64 stripe_len)
978 {
979 struct btrfs_raid_bio *rbio;
980 int nr_data = 0;
981 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
982 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
983 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
984 void *p;
985
986 rbio = kzalloc(sizeof(*rbio) +
987 sizeof(*rbio->stripe_pages) * num_pages +
988 sizeof(*rbio->bio_pages) * num_pages +
989 sizeof(*rbio->finish_pointers) * real_stripes +
990 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
991 sizeof(*rbio->finish_pbitmap) *
992 BITS_TO_LONGS(stripe_npages),
993 GFP_NOFS);
994 if (!rbio)
995 return ERR_PTR(-ENOMEM);
996
997 bio_list_init(&rbio->bio_list);
998 INIT_LIST_HEAD(&rbio->plug_list);
999 spin_lock_init(&rbio->bio_list_lock);
1000 INIT_LIST_HEAD(&rbio->stripe_cache);
1001 INIT_LIST_HEAD(&rbio->hash_list);
1002 rbio->bbio = bbio;
1003 rbio->fs_info = fs_info;
1004 rbio->stripe_len = stripe_len;
1005 rbio->nr_pages = num_pages;
1006 rbio->real_stripes = real_stripes;
1007 rbio->stripe_npages = stripe_npages;
1008 rbio->faila = -1;
1009 rbio->failb = -1;
1010 refcount_set(&rbio->refs, 1);
1011 atomic_set(&rbio->error, 0);
1012 atomic_set(&rbio->stripes_pending, 0);
1013
1014
1015
1016
1017
1018 p = rbio + 1;
1019 #define CONSUME_ALLOC(ptr, count) do { \
1020 ptr = p; \
1021 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1022 } while (0)
1023 CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1024 CONSUME_ALLOC(rbio->bio_pages, num_pages);
1025 CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1026 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1027 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1028 #undef CONSUME_ALLOC
1029
1030 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1031 nr_data = real_stripes - 1;
1032 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1033 nr_data = real_stripes - 2;
1034 else
1035 BUG();
1036
1037 rbio->nr_data = nr_data;
1038 return rbio;
1039 }
1040
1041
1042 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1043 {
1044 int i;
1045 struct page *page;
1046
1047 for (i = 0; i < rbio->nr_pages; i++) {
1048 if (rbio->stripe_pages[i])
1049 continue;
1050 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1051 if (!page)
1052 return -ENOMEM;
1053 rbio->stripe_pages[i] = page;
1054 }
1055 return 0;
1056 }
1057
1058
1059 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1060 {
1061 int i;
1062 struct page *page;
1063
1064 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1065
1066 for (; i < rbio->nr_pages; i++) {
1067 if (rbio->stripe_pages[i])
1068 continue;
1069 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1070 if (!page)
1071 return -ENOMEM;
1072 rbio->stripe_pages[i] = page;
1073 }
1074 return 0;
1075 }
1076
1077
1078
1079
1080
1081
1082 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1083 struct bio_list *bio_list,
1084 struct page *page,
1085 int stripe_nr,
1086 unsigned long page_index,
1087 unsigned long bio_max_len)
1088 {
1089 struct bio *last = bio_list->tail;
1090 u64 last_end = 0;
1091 int ret;
1092 struct bio *bio;
1093 struct btrfs_bio_stripe *stripe;
1094 u64 disk_start;
1095
1096 stripe = &rbio->bbio->stripes[stripe_nr];
1097 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1098
1099
1100 if (!stripe->dev->bdev)
1101 return fail_rbio_index(rbio, stripe_nr);
1102
1103
1104 if (last) {
1105 last_end = (u64)last->bi_iter.bi_sector << 9;
1106 last_end += last->bi_iter.bi_size;
1107
1108
1109
1110
1111
1112 if (last_end == disk_start && stripe->dev->bdev &&
1113 !last->bi_status &&
1114 last->bi_disk == stripe->dev->bdev->bd_disk &&
1115 last->bi_partno == stripe->dev->bdev->bd_partno) {
1116 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1117 if (ret == PAGE_SIZE)
1118 return 0;
1119 }
1120 }
1121
1122
1123 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1124 bio->bi_iter.bi_size = 0;
1125 bio_set_dev(bio, stripe->dev->bdev);
1126 bio->bi_iter.bi_sector = disk_start >> 9;
1127
1128 bio_add_page(bio, page, PAGE_SIZE, 0);
1129 bio_list_add(bio_list, bio);
1130 return 0;
1131 }
1132
1133
1134
1135
1136
1137
1138
1139
1140 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1141 {
1142 if (rbio->faila >= 0 || rbio->failb >= 0) {
1143 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1144 __raid56_parity_recover(rbio);
1145 } else {
1146 finish_rmw(rbio);
1147 }
1148 }
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1159 {
1160 struct bio *bio;
1161 u64 start;
1162 unsigned long stripe_offset;
1163 unsigned long page_index;
1164
1165 spin_lock_irq(&rbio->bio_list_lock);
1166 bio_list_for_each(bio, &rbio->bio_list) {
1167 struct bio_vec bvec;
1168 struct bvec_iter iter;
1169 int i = 0;
1170
1171 start = (u64)bio->bi_iter.bi_sector << 9;
1172 stripe_offset = start - rbio->bbio->raid_map[0];
1173 page_index = stripe_offset >> PAGE_SHIFT;
1174
1175 if (bio_flagged(bio, BIO_CLONED))
1176 bio->bi_iter = btrfs_io_bio(bio)->iter;
1177
1178 bio_for_each_segment(bvec, bio, iter) {
1179 rbio->bio_pages[page_index + i] = bvec.bv_page;
1180 i++;
1181 }
1182 }
1183 spin_unlock_irq(&rbio->bio_list_lock);
1184 }
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1195 {
1196 struct btrfs_bio *bbio = rbio->bbio;
1197 void **pointers = rbio->finish_pointers;
1198 int nr_data = rbio->nr_data;
1199 int stripe;
1200 int pagenr;
1201 int p_stripe = -1;
1202 int q_stripe = -1;
1203 struct bio_list bio_list;
1204 struct bio *bio;
1205 int ret;
1206
1207 bio_list_init(&bio_list);
1208
1209 if (rbio->real_stripes - rbio->nr_data == 1) {
1210 p_stripe = rbio->real_stripes - 1;
1211 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1212 p_stripe = rbio->real_stripes - 2;
1213 q_stripe = rbio->real_stripes - 1;
1214 } else {
1215 BUG();
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226 spin_lock_irq(&rbio->bio_list_lock);
1227 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1228 spin_unlock_irq(&rbio->bio_list_lock);
1229
1230 atomic_set(&rbio->error, 0);
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 index_rbio_pages(rbio);
1242 if (!rbio_is_full(rbio))
1243 cache_rbio_pages(rbio);
1244 else
1245 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1246
1247 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1248 struct page *p;
1249
1250 for (stripe = 0; stripe < nr_data; stripe++) {
1251 p = page_in_rbio(rbio, stripe, pagenr, 0);
1252 pointers[stripe] = kmap(p);
1253 }
1254
1255
1256 p = rbio_pstripe_page(rbio, pagenr);
1257 SetPageUptodate(p);
1258 pointers[stripe++] = kmap(p);
1259
1260 if (q_stripe != -1) {
1261
1262
1263
1264
1265
1266 p = rbio_qstripe_page(rbio, pagenr);
1267 SetPageUptodate(p);
1268 pointers[stripe++] = kmap(p);
1269
1270 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1271 pointers);
1272 } else {
1273
1274 copy_page(pointers[nr_data], pointers[0]);
1275 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1276 }
1277
1278
1279 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1280 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1281 }
1282
1283
1284
1285
1286
1287
1288 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1289 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1290 struct page *page;
1291 if (stripe < rbio->nr_data) {
1292 page = page_in_rbio(rbio, stripe, pagenr, 1);
1293 if (!page)
1294 continue;
1295 } else {
1296 page = rbio_stripe_page(rbio, stripe, pagenr);
1297 }
1298
1299 ret = rbio_add_io_page(rbio, &bio_list,
1300 page, stripe, pagenr, rbio->stripe_len);
1301 if (ret)
1302 goto cleanup;
1303 }
1304 }
1305
1306 if (likely(!bbio->num_tgtdevs))
1307 goto write_data;
1308
1309 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1310 if (!bbio->tgtdev_map[stripe])
1311 continue;
1312
1313 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1314 struct page *page;
1315 if (stripe < rbio->nr_data) {
1316 page = page_in_rbio(rbio, stripe, pagenr, 1);
1317 if (!page)
1318 continue;
1319 } else {
1320 page = rbio_stripe_page(rbio, stripe, pagenr);
1321 }
1322
1323 ret = rbio_add_io_page(rbio, &bio_list, page,
1324 rbio->bbio->tgtdev_map[stripe],
1325 pagenr, rbio->stripe_len);
1326 if (ret)
1327 goto cleanup;
1328 }
1329 }
1330
1331 write_data:
1332 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1333 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1334
1335 while (1) {
1336 bio = bio_list_pop(&bio_list);
1337 if (!bio)
1338 break;
1339
1340 bio->bi_private = rbio;
1341 bio->bi_end_io = raid_write_end_io;
1342 bio->bi_opf = REQ_OP_WRITE;
1343
1344 submit_bio(bio);
1345 }
1346 return;
1347
1348 cleanup:
1349 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1350
1351 while ((bio = bio_list_pop(&bio_list)))
1352 bio_put(bio);
1353 }
1354
1355
1356
1357
1358
1359
1360 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1361 struct bio *bio)
1362 {
1363 u64 physical = bio->bi_iter.bi_sector;
1364 u64 stripe_start;
1365 int i;
1366 struct btrfs_bio_stripe *stripe;
1367
1368 physical <<= 9;
1369
1370 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1371 stripe = &rbio->bbio->stripes[i];
1372 stripe_start = stripe->physical;
1373 if (physical >= stripe_start &&
1374 physical < stripe_start + rbio->stripe_len &&
1375 stripe->dev->bdev &&
1376 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1377 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1378 return i;
1379 }
1380 }
1381 return -1;
1382 }
1383
1384
1385
1386
1387
1388
1389 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1390 struct bio *bio)
1391 {
1392 u64 logical = bio->bi_iter.bi_sector;
1393 u64 stripe_start;
1394 int i;
1395
1396 logical <<= 9;
1397
1398 for (i = 0; i < rbio->nr_data; i++) {
1399 stripe_start = rbio->bbio->raid_map[i];
1400 if (logical >= stripe_start &&
1401 logical < stripe_start + rbio->stripe_len) {
1402 return i;
1403 }
1404 }
1405 return -1;
1406 }
1407
1408
1409
1410
1411 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1412 {
1413 unsigned long flags;
1414 int ret = 0;
1415
1416 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1417
1418
1419 if (rbio->faila == failed || rbio->failb == failed)
1420 goto out;
1421
1422 if (rbio->faila == -1) {
1423
1424 rbio->faila = failed;
1425 atomic_inc(&rbio->error);
1426 } else if (rbio->failb == -1) {
1427
1428 rbio->failb = failed;
1429 atomic_inc(&rbio->error);
1430 } else {
1431 ret = -EIO;
1432 }
1433 out:
1434 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1435
1436 return ret;
1437 }
1438
1439
1440
1441
1442
1443 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1444 struct bio *bio)
1445 {
1446 int failed = find_bio_stripe(rbio, bio);
1447
1448 if (failed < 0)
1449 return -EIO;
1450
1451 return fail_rbio_index(rbio, failed);
1452 }
1453
1454
1455
1456
1457
1458 static void set_bio_pages_uptodate(struct bio *bio)
1459 {
1460 struct bio_vec *bvec;
1461 struct bvec_iter_all iter_all;
1462
1463 ASSERT(!bio_flagged(bio, BIO_CLONED));
1464
1465 bio_for_each_segment_all(bvec, bio, iter_all)
1466 SetPageUptodate(bvec->bv_page);
1467 }
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 static void raid_rmw_end_io(struct bio *bio)
1478 {
1479 struct btrfs_raid_bio *rbio = bio->bi_private;
1480
1481 if (bio->bi_status)
1482 fail_bio_stripe(rbio, bio);
1483 else
1484 set_bio_pages_uptodate(bio);
1485
1486 bio_put(bio);
1487
1488 if (!atomic_dec_and_test(&rbio->stripes_pending))
1489 return;
1490
1491 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1492 goto cleanup;
1493
1494
1495
1496
1497
1498
1499 validate_rbio_for_rmw(rbio);
1500 return;
1501
1502 cleanup:
1503
1504 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1505 }
1506
1507
1508
1509
1510
1511 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1512 {
1513 int bios_to_read = 0;
1514 struct bio_list bio_list;
1515 int ret;
1516 int pagenr;
1517 int stripe;
1518 struct bio *bio;
1519
1520 bio_list_init(&bio_list);
1521
1522 ret = alloc_rbio_pages(rbio);
1523 if (ret)
1524 goto cleanup;
1525
1526 index_rbio_pages(rbio);
1527
1528 atomic_set(&rbio->error, 0);
1529
1530
1531
1532
1533 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1534 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1535 struct page *page;
1536
1537
1538
1539
1540
1541
1542 page = page_in_rbio(rbio, stripe, pagenr, 1);
1543 if (page)
1544 continue;
1545
1546 page = rbio_stripe_page(rbio, stripe, pagenr);
1547
1548
1549
1550
1551 if (PageUptodate(page))
1552 continue;
1553
1554 ret = rbio_add_io_page(rbio, &bio_list, page,
1555 stripe, pagenr, rbio->stripe_len);
1556 if (ret)
1557 goto cleanup;
1558 }
1559 }
1560
1561 bios_to_read = bio_list_size(&bio_list);
1562 if (!bios_to_read) {
1563
1564
1565
1566
1567
1568
1569 goto finish;
1570 }
1571
1572
1573
1574
1575
1576 atomic_set(&rbio->stripes_pending, bios_to_read);
1577 while (1) {
1578 bio = bio_list_pop(&bio_list);
1579 if (!bio)
1580 break;
1581
1582 bio->bi_private = rbio;
1583 bio->bi_end_io = raid_rmw_end_io;
1584 bio->bi_opf = REQ_OP_READ;
1585
1586 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1587
1588 submit_bio(bio);
1589 }
1590
1591 return 0;
1592
1593 cleanup:
1594 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1595
1596 while ((bio = bio_list_pop(&bio_list)))
1597 bio_put(bio);
1598
1599 return -EIO;
1600
1601 finish:
1602 validate_rbio_for_rmw(rbio);
1603 return 0;
1604 }
1605
1606
1607
1608
1609
1610 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1611 {
1612 int ret;
1613
1614 ret = alloc_rbio_parity_pages(rbio);
1615 if (ret) {
1616 __free_raid_bio(rbio);
1617 return ret;
1618 }
1619
1620 ret = lock_stripe_add(rbio);
1621 if (ret == 0)
1622 finish_rmw(rbio);
1623 return 0;
1624 }
1625
1626
1627
1628
1629
1630
1631 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1632 {
1633 int ret;
1634
1635 ret = lock_stripe_add(rbio);
1636 if (ret == 0)
1637 start_async_work(rbio, rmw_work);
1638 return 0;
1639 }
1640
1641
1642
1643
1644
1645
1646
1647 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1648 {
1649
1650 if (!rbio_is_full(rbio))
1651 return partial_stripe_write(rbio);
1652 return full_stripe_write(rbio);
1653 }
1654
1655
1656
1657
1658
1659
1660
1661
1662 struct btrfs_plug_cb {
1663 struct blk_plug_cb cb;
1664 struct btrfs_fs_info *info;
1665 struct list_head rbio_list;
1666 struct btrfs_work work;
1667 };
1668
1669
1670
1671
1672 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1673 {
1674 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1675 plug_list);
1676 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1677 plug_list);
1678 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1679 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1680
1681 if (a_sector < b_sector)
1682 return -1;
1683 if (a_sector > b_sector)
1684 return 1;
1685 return 0;
1686 }
1687
1688 static void run_plug(struct btrfs_plug_cb *plug)
1689 {
1690 struct btrfs_raid_bio *cur;
1691 struct btrfs_raid_bio *last = NULL;
1692
1693
1694
1695
1696
1697
1698 list_sort(NULL, &plug->rbio_list, plug_cmp);
1699 while (!list_empty(&plug->rbio_list)) {
1700 cur = list_entry(plug->rbio_list.next,
1701 struct btrfs_raid_bio, plug_list);
1702 list_del_init(&cur->plug_list);
1703
1704 if (rbio_is_full(cur)) {
1705 int ret;
1706
1707
1708 ret = full_stripe_write(cur);
1709 BUG_ON(ret);
1710 continue;
1711 }
1712 if (last) {
1713 if (rbio_can_merge(last, cur)) {
1714 merge_rbio(last, cur);
1715 __free_raid_bio(cur);
1716 continue;
1717
1718 }
1719 __raid56_parity_write(last);
1720 }
1721 last = cur;
1722 }
1723 if (last) {
1724 __raid56_parity_write(last);
1725 }
1726 kfree(plug);
1727 }
1728
1729
1730
1731
1732
1733 static void unplug_work(struct btrfs_work *work)
1734 {
1735 struct btrfs_plug_cb *plug;
1736 plug = container_of(work, struct btrfs_plug_cb, work);
1737 run_plug(plug);
1738 }
1739
1740 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1741 {
1742 struct btrfs_plug_cb *plug;
1743 plug = container_of(cb, struct btrfs_plug_cb, cb);
1744
1745 if (from_schedule) {
1746 btrfs_init_work(&plug->work, unplug_work, NULL, NULL);
1747 btrfs_queue_work(plug->info->rmw_workers,
1748 &plug->work);
1749 return;
1750 }
1751 run_plug(plug);
1752 }
1753
1754
1755
1756
1757 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1758 struct btrfs_bio *bbio, u64 stripe_len)
1759 {
1760 struct btrfs_raid_bio *rbio;
1761 struct btrfs_plug_cb *plug = NULL;
1762 struct blk_plug_cb *cb;
1763 int ret;
1764
1765 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1766 if (IS_ERR(rbio)) {
1767 btrfs_put_bbio(bbio);
1768 return PTR_ERR(rbio);
1769 }
1770 bio_list_add(&rbio->bio_list, bio);
1771 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1772 rbio->operation = BTRFS_RBIO_WRITE;
1773
1774 btrfs_bio_counter_inc_noblocked(fs_info);
1775 rbio->generic_bio_cnt = 1;
1776
1777
1778
1779
1780
1781 if (rbio_is_full(rbio)) {
1782 ret = full_stripe_write(rbio);
1783 if (ret)
1784 btrfs_bio_counter_dec(fs_info);
1785 return ret;
1786 }
1787
1788 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1789 if (cb) {
1790 plug = container_of(cb, struct btrfs_plug_cb, cb);
1791 if (!plug->info) {
1792 plug->info = fs_info;
1793 INIT_LIST_HEAD(&plug->rbio_list);
1794 }
1795 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1796 ret = 0;
1797 } else {
1798 ret = __raid56_parity_write(rbio);
1799 if (ret)
1800 btrfs_bio_counter_dec(fs_info);
1801 }
1802 return ret;
1803 }
1804
1805
1806
1807
1808
1809
1810 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1811 {
1812 int pagenr, stripe;
1813 void **pointers;
1814 int faila = -1, failb = -1;
1815 struct page *page;
1816 blk_status_t err;
1817 int i;
1818
1819 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1820 if (!pointers) {
1821 err = BLK_STS_RESOURCE;
1822 goto cleanup_io;
1823 }
1824
1825 faila = rbio->faila;
1826 failb = rbio->failb;
1827
1828 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1829 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1830 spin_lock_irq(&rbio->bio_list_lock);
1831 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1832 spin_unlock_irq(&rbio->bio_list_lock);
1833 }
1834
1835 index_rbio_pages(rbio);
1836
1837 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1838
1839
1840
1841
1842 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1843 !test_bit(pagenr, rbio->dbitmap))
1844 continue;
1845
1846
1847
1848
1849 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1850
1851
1852
1853
1854 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1855 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1856 (stripe == faila || stripe == failb)) {
1857 page = page_in_rbio(rbio, stripe, pagenr, 0);
1858 } else {
1859 page = rbio_stripe_page(rbio, stripe, pagenr);
1860 }
1861 pointers[stripe] = kmap(page);
1862 }
1863
1864
1865 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1866
1867
1868
1869
1870 if (failb < 0) {
1871 if (faila == rbio->nr_data) {
1872
1873
1874
1875
1876
1877 err = BLK_STS_IOERR;
1878 goto cleanup;
1879 }
1880
1881
1882
1883
1884 goto pstripe;
1885 }
1886
1887
1888 if (faila > failb) {
1889 int tmp = failb;
1890 failb = faila;
1891 faila = tmp;
1892 }
1893
1894
1895
1896
1897
1898
1899
1900 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1901 if (rbio->bbio->raid_map[faila] ==
1902 RAID5_P_STRIPE) {
1903 err = BLK_STS_IOERR;
1904 goto cleanup;
1905 }
1906
1907
1908
1909
1910 goto pstripe;
1911 }
1912
1913 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1914 raid6_datap_recov(rbio->real_stripes,
1915 PAGE_SIZE, faila, pointers);
1916 } else {
1917 raid6_2data_recov(rbio->real_stripes,
1918 PAGE_SIZE, faila, failb,
1919 pointers);
1920 }
1921 } else {
1922 void *p;
1923
1924
1925 BUG_ON(failb != -1);
1926 pstripe:
1927
1928 copy_page(pointers[faila], pointers[rbio->nr_data]);
1929
1930
1931 p = pointers[faila];
1932 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1933 pointers[stripe] = pointers[stripe + 1];
1934 pointers[rbio->nr_data - 1] = p;
1935
1936
1937 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1938 }
1939
1940
1941
1942
1943
1944
1945 if (rbio->operation == BTRFS_RBIO_WRITE) {
1946 for (i = 0; i < rbio->stripe_npages; i++) {
1947 if (faila != -1) {
1948 page = rbio_stripe_page(rbio, faila, i);
1949 SetPageUptodate(page);
1950 }
1951 if (failb != -1) {
1952 page = rbio_stripe_page(rbio, failb, i);
1953 SetPageUptodate(page);
1954 }
1955 }
1956 }
1957 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1958
1959
1960
1961
1962 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1963 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1964 (stripe == faila || stripe == failb)) {
1965 page = page_in_rbio(rbio, stripe, pagenr, 0);
1966 } else {
1967 page = rbio_stripe_page(rbio, stripe, pagenr);
1968 }
1969 kunmap(page);
1970 }
1971 }
1972
1973 err = BLK_STS_OK;
1974 cleanup:
1975 kfree(pointers);
1976
1977 cleanup_io:
1978
1979
1980
1981
1982
1983 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1984 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000 if (err == BLK_STS_OK && rbio->failb < 0)
2001 cache_rbio_pages(rbio);
2002 else
2003 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2004
2005 rbio_orig_end_io(rbio, err);
2006 } else if (err == BLK_STS_OK) {
2007 rbio->faila = -1;
2008 rbio->failb = -1;
2009
2010 if (rbio->operation == BTRFS_RBIO_WRITE)
2011 finish_rmw(rbio);
2012 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2013 finish_parity_scrub(rbio, 0);
2014 else
2015 BUG();
2016 } else {
2017 rbio_orig_end_io(rbio, err);
2018 }
2019 }
2020
2021
2022
2023
2024
2025 static void raid_recover_end_io(struct bio *bio)
2026 {
2027 struct btrfs_raid_bio *rbio = bio->bi_private;
2028
2029
2030
2031
2032
2033 if (bio->bi_status)
2034 fail_bio_stripe(rbio, bio);
2035 else
2036 set_bio_pages_uptodate(bio);
2037 bio_put(bio);
2038
2039 if (!atomic_dec_and_test(&rbio->stripes_pending))
2040 return;
2041
2042 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2043 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2044 else
2045 __raid_recover_end_io(rbio);
2046 }
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2057 {
2058 int bios_to_read = 0;
2059 struct bio_list bio_list;
2060 int ret;
2061 int pagenr;
2062 int stripe;
2063 struct bio *bio;
2064
2065 bio_list_init(&bio_list);
2066
2067 ret = alloc_rbio_pages(rbio);
2068 if (ret)
2069 goto cleanup;
2070
2071 atomic_set(&rbio->error, 0);
2072
2073
2074
2075
2076
2077
2078 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2079 if (rbio->faila == stripe || rbio->failb == stripe) {
2080 atomic_inc(&rbio->error);
2081 continue;
2082 }
2083
2084 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2085 struct page *p;
2086
2087
2088
2089
2090
2091 p = rbio_stripe_page(rbio, stripe, pagenr);
2092 if (PageUptodate(p))
2093 continue;
2094
2095 ret = rbio_add_io_page(rbio, &bio_list,
2096 rbio_stripe_page(rbio, stripe, pagenr),
2097 stripe, pagenr, rbio->stripe_len);
2098 if (ret < 0)
2099 goto cleanup;
2100 }
2101 }
2102
2103 bios_to_read = bio_list_size(&bio_list);
2104 if (!bios_to_read) {
2105
2106
2107
2108
2109
2110 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2111 __raid_recover_end_io(rbio);
2112 goto out;
2113 } else {
2114 goto cleanup;
2115 }
2116 }
2117
2118
2119
2120
2121
2122 atomic_set(&rbio->stripes_pending, bios_to_read);
2123 while (1) {
2124 bio = bio_list_pop(&bio_list);
2125 if (!bio)
2126 break;
2127
2128 bio->bi_private = rbio;
2129 bio->bi_end_io = raid_recover_end_io;
2130 bio->bi_opf = REQ_OP_READ;
2131
2132 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2133
2134 submit_bio(bio);
2135 }
2136 out:
2137 return 0;
2138
2139 cleanup:
2140 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2141 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2142 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2143
2144 while ((bio = bio_list_pop(&bio_list)))
2145 bio_put(bio);
2146
2147 return -EIO;
2148 }
2149
2150
2151
2152
2153
2154
2155
2156 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2157 struct btrfs_bio *bbio, u64 stripe_len,
2158 int mirror_num, int generic_io)
2159 {
2160 struct btrfs_raid_bio *rbio;
2161 int ret;
2162
2163 if (generic_io) {
2164 ASSERT(bbio->mirror_num == mirror_num);
2165 btrfs_io_bio(bio)->mirror_num = mirror_num;
2166 }
2167
2168 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2169 if (IS_ERR(rbio)) {
2170 if (generic_io)
2171 btrfs_put_bbio(bbio);
2172 return PTR_ERR(rbio);
2173 }
2174
2175 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2176 bio_list_add(&rbio->bio_list, bio);
2177 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2178
2179 rbio->faila = find_logical_bio_stripe(rbio, bio);
2180 if (rbio->faila == -1) {
2181 btrfs_warn(fs_info,
2182 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2183 __func__, (u64)bio->bi_iter.bi_sector << 9,
2184 (u64)bio->bi_iter.bi_size, bbio->map_type);
2185 if (generic_io)
2186 btrfs_put_bbio(bbio);
2187 kfree(rbio);
2188 return -EIO;
2189 }
2190
2191 if (generic_io) {
2192 btrfs_bio_counter_inc_noblocked(fs_info);
2193 rbio->generic_bio_cnt = 1;
2194 } else {
2195 btrfs_get_bbio(bbio);
2196 }
2197
2198
2199
2200
2201
2202
2203 if (mirror_num > 2) {
2204
2205
2206
2207
2208
2209 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2210 ASSERT(rbio->failb > 0);
2211 if (rbio->failb <= rbio->faila)
2212 rbio->failb--;
2213 }
2214
2215 ret = lock_stripe_add(rbio);
2216
2217
2218
2219
2220
2221
2222
2223
2224 if (ret == 0)
2225 __raid56_parity_recover(rbio);
2226
2227
2228
2229
2230
2231 return 0;
2232
2233 }
2234
2235 static void rmw_work(struct btrfs_work *work)
2236 {
2237 struct btrfs_raid_bio *rbio;
2238
2239 rbio = container_of(work, struct btrfs_raid_bio, work);
2240 raid56_rmw_stripe(rbio);
2241 }
2242
2243 static void read_rebuild_work(struct btrfs_work *work)
2244 {
2245 struct btrfs_raid_bio *rbio;
2246
2247 rbio = container_of(work, struct btrfs_raid_bio, work);
2248 __raid56_parity_recover(rbio);
2249 }
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261 struct btrfs_raid_bio *
2262 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2263 struct btrfs_bio *bbio, u64 stripe_len,
2264 struct btrfs_device *scrub_dev,
2265 unsigned long *dbitmap, int stripe_nsectors)
2266 {
2267 struct btrfs_raid_bio *rbio;
2268 int i;
2269
2270 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2271 if (IS_ERR(rbio))
2272 return NULL;
2273 bio_list_add(&rbio->bio_list, bio);
2274
2275
2276
2277
2278 ASSERT(!bio->bi_iter.bi_size);
2279 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2280
2281
2282
2283
2284
2285
2286 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2287 if (bbio->stripes[i].dev == scrub_dev) {
2288 rbio->scrubp = i;
2289 break;
2290 }
2291 }
2292 ASSERT(i < rbio->real_stripes);
2293
2294
2295 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2296 ASSERT(rbio->stripe_npages == stripe_nsectors);
2297 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2298
2299
2300
2301
2302
2303 rbio->generic_bio_cnt = 1;
2304
2305 return rbio;
2306 }
2307
2308
2309 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2310 u64 logical)
2311 {
2312 int stripe_offset;
2313 int index;
2314
2315 ASSERT(logical >= rbio->bbio->raid_map[0]);
2316 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2317 rbio->stripe_len * rbio->nr_data);
2318 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2319 index = stripe_offset >> PAGE_SHIFT;
2320 rbio->bio_pages[index] = page;
2321 }
2322
2323
2324
2325
2326
2327 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2328 {
2329 int i;
2330 int bit;
2331 int index;
2332 struct page *page;
2333
2334 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2335 for (i = 0; i < rbio->real_stripes; i++) {
2336 index = i * rbio->stripe_npages + bit;
2337 if (rbio->stripe_pages[index])
2338 continue;
2339
2340 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2341 if (!page)
2342 return -ENOMEM;
2343 rbio->stripe_pages[index] = page;
2344 }
2345 }
2346 return 0;
2347 }
2348
2349 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2350 int need_check)
2351 {
2352 struct btrfs_bio *bbio = rbio->bbio;
2353 void **pointers = rbio->finish_pointers;
2354 unsigned long *pbitmap = rbio->finish_pbitmap;
2355 int nr_data = rbio->nr_data;
2356 int stripe;
2357 int pagenr;
2358 int p_stripe = -1;
2359 int q_stripe = -1;
2360 struct page *p_page = NULL;
2361 struct page *q_page = NULL;
2362 struct bio_list bio_list;
2363 struct bio *bio;
2364 int is_replace = 0;
2365 int ret;
2366
2367 bio_list_init(&bio_list);
2368
2369 if (rbio->real_stripes - rbio->nr_data == 1) {
2370 p_stripe = rbio->real_stripes - 1;
2371 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2372 p_stripe = rbio->real_stripes - 2;
2373 q_stripe = rbio->real_stripes - 1;
2374 } else {
2375 BUG();
2376 }
2377
2378 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2379 is_replace = 1;
2380 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2381 }
2382
2383
2384
2385
2386
2387
2388 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2389
2390 if (!need_check)
2391 goto writeback;
2392
2393 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2394 if (!p_page)
2395 goto cleanup;
2396 SetPageUptodate(p_page);
2397
2398 if (q_stripe != -1) {
2399 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2400 if (!q_page) {
2401 __free_page(p_page);
2402 goto cleanup;
2403 }
2404 SetPageUptodate(q_page);
2405 }
2406
2407 atomic_set(&rbio->error, 0);
2408
2409 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2410 struct page *p;
2411 void *parity;
2412
2413 for (stripe = 0; stripe < nr_data; stripe++) {
2414 p = page_in_rbio(rbio, stripe, pagenr, 0);
2415 pointers[stripe] = kmap(p);
2416 }
2417
2418
2419 pointers[stripe++] = kmap(p_page);
2420
2421 if (q_stripe != -1) {
2422
2423
2424
2425
2426
2427 pointers[stripe++] = kmap(q_page);
2428
2429 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2430 pointers);
2431 } else {
2432
2433 copy_page(pointers[nr_data], pointers[0]);
2434 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2435 }
2436
2437
2438 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2439 parity = kmap(p);
2440 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2441 copy_page(parity, pointers[rbio->scrubp]);
2442 else
2443
2444 bitmap_clear(rbio->dbitmap, pagenr, 1);
2445 kunmap(p);
2446
2447 for (stripe = 0; stripe < nr_data; stripe++)
2448 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2449 kunmap(p_page);
2450 }
2451
2452 __free_page(p_page);
2453 if (q_page)
2454 __free_page(q_page);
2455
2456 writeback:
2457
2458
2459
2460
2461
2462 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2463 struct page *page;
2464
2465 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2466 ret = rbio_add_io_page(rbio, &bio_list,
2467 page, rbio->scrubp, pagenr, rbio->stripe_len);
2468 if (ret)
2469 goto cleanup;
2470 }
2471
2472 if (!is_replace)
2473 goto submit_write;
2474
2475 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2476 struct page *page;
2477
2478 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2479 ret = rbio_add_io_page(rbio, &bio_list, page,
2480 bbio->tgtdev_map[rbio->scrubp],
2481 pagenr, rbio->stripe_len);
2482 if (ret)
2483 goto cleanup;
2484 }
2485
2486 submit_write:
2487 nr_data = bio_list_size(&bio_list);
2488 if (!nr_data) {
2489
2490 rbio_orig_end_io(rbio, BLK_STS_OK);
2491 return;
2492 }
2493
2494 atomic_set(&rbio->stripes_pending, nr_data);
2495
2496 while (1) {
2497 bio = bio_list_pop(&bio_list);
2498 if (!bio)
2499 break;
2500
2501 bio->bi_private = rbio;
2502 bio->bi_end_io = raid_write_end_io;
2503 bio->bi_opf = REQ_OP_WRITE;
2504
2505 submit_bio(bio);
2506 }
2507 return;
2508
2509 cleanup:
2510 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2511
2512 while ((bio = bio_list_pop(&bio_list)))
2513 bio_put(bio);
2514 }
2515
2516 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2517 {
2518 if (stripe >= 0 && stripe < rbio->nr_data)
2519 return 1;
2520 return 0;
2521 }
2522
2523
2524
2525
2526
2527
2528
2529
2530 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2531 {
2532 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2533 goto cleanup;
2534
2535 if (rbio->faila >= 0 || rbio->failb >= 0) {
2536 int dfail = 0, failp = -1;
2537
2538 if (is_data_stripe(rbio, rbio->faila))
2539 dfail++;
2540 else if (is_parity_stripe(rbio->faila))
2541 failp = rbio->faila;
2542
2543 if (is_data_stripe(rbio, rbio->failb))
2544 dfail++;
2545 else if (is_parity_stripe(rbio->failb))
2546 failp = rbio->failb;
2547
2548
2549
2550
2551
2552
2553 if (dfail > rbio->bbio->max_errors - 1)
2554 goto cleanup;
2555
2556
2557
2558
2559
2560 if (dfail == 0) {
2561 finish_parity_scrub(rbio, 0);
2562 return;
2563 }
2564
2565
2566
2567
2568
2569
2570
2571 if (failp != rbio->scrubp)
2572 goto cleanup;
2573
2574 __raid_recover_end_io(rbio);
2575 } else {
2576 finish_parity_scrub(rbio, 1);
2577 }
2578 return;
2579
2580 cleanup:
2581 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2582 }
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592 static void raid56_parity_scrub_end_io(struct bio *bio)
2593 {
2594 struct btrfs_raid_bio *rbio = bio->bi_private;
2595
2596 if (bio->bi_status)
2597 fail_bio_stripe(rbio, bio);
2598 else
2599 set_bio_pages_uptodate(bio);
2600
2601 bio_put(bio);
2602
2603 if (!atomic_dec_and_test(&rbio->stripes_pending))
2604 return;
2605
2606
2607
2608
2609
2610
2611 validate_rbio_for_parity_scrub(rbio);
2612 }
2613
2614 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2615 {
2616 int bios_to_read = 0;
2617 struct bio_list bio_list;
2618 int ret;
2619 int pagenr;
2620 int stripe;
2621 struct bio *bio;
2622
2623 bio_list_init(&bio_list);
2624
2625 ret = alloc_rbio_essential_pages(rbio);
2626 if (ret)
2627 goto cleanup;
2628
2629 atomic_set(&rbio->error, 0);
2630
2631
2632
2633
2634 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2635 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2636 struct page *page;
2637
2638
2639
2640
2641
2642
2643 page = page_in_rbio(rbio, stripe, pagenr, 1);
2644 if (page)
2645 continue;
2646
2647 page = rbio_stripe_page(rbio, stripe, pagenr);
2648
2649
2650
2651
2652 if (PageUptodate(page))
2653 continue;
2654
2655 ret = rbio_add_io_page(rbio, &bio_list, page,
2656 stripe, pagenr, rbio->stripe_len);
2657 if (ret)
2658 goto cleanup;
2659 }
2660 }
2661
2662 bios_to_read = bio_list_size(&bio_list);
2663 if (!bios_to_read) {
2664
2665
2666
2667
2668
2669
2670 goto finish;
2671 }
2672
2673
2674
2675
2676
2677 atomic_set(&rbio->stripes_pending, bios_to_read);
2678 while (1) {
2679 bio = bio_list_pop(&bio_list);
2680 if (!bio)
2681 break;
2682
2683 bio->bi_private = rbio;
2684 bio->bi_end_io = raid56_parity_scrub_end_io;
2685 bio->bi_opf = REQ_OP_READ;
2686
2687 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2688
2689 submit_bio(bio);
2690 }
2691
2692 return;
2693
2694 cleanup:
2695 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2696
2697 while ((bio = bio_list_pop(&bio_list)))
2698 bio_put(bio);
2699
2700 return;
2701
2702 finish:
2703 validate_rbio_for_parity_scrub(rbio);
2704 }
2705
2706 static void scrub_parity_work(struct btrfs_work *work)
2707 {
2708 struct btrfs_raid_bio *rbio;
2709
2710 rbio = container_of(work, struct btrfs_raid_bio, work);
2711 raid56_parity_scrub_stripe(rbio);
2712 }
2713
2714 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2715 {
2716 if (!lock_stripe_add(rbio))
2717 start_async_work(rbio, scrub_parity_work);
2718 }
2719
2720
2721
2722 struct btrfs_raid_bio *
2723 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2724 struct btrfs_bio *bbio, u64 length)
2725 {
2726 struct btrfs_raid_bio *rbio;
2727
2728 rbio = alloc_rbio(fs_info, bbio, length);
2729 if (IS_ERR(rbio))
2730 return NULL;
2731
2732 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2733 bio_list_add(&rbio->bio_list, bio);
2734
2735
2736
2737
2738 ASSERT(!bio->bi_iter.bi_size);
2739
2740 rbio->faila = find_logical_bio_stripe(rbio, bio);
2741 if (rbio->faila == -1) {
2742 BUG();
2743 kfree(rbio);
2744 return NULL;
2745 }
2746
2747
2748
2749
2750
2751 rbio->generic_bio_cnt = 1;
2752
2753 return rbio;
2754 }
2755
2756 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2757 {
2758 if (!lock_stripe_add(rbio))
2759 start_async_work(rbio, read_rebuild_work);
2760 }