Lines Matching refs:sh
188 static inline int raid6_d0(struct stripe_head *sh) in raid6_d0() argument
190 if (sh->ddf_layout) in raid6_d0()
194 if (sh->qd_idx == sh->disks - 1) in raid6_d0()
197 return sh->qd_idx + 1; in raid6_d0()
210 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, in raid6_idx_to_slot() argument
215 if (sh->ddf_layout) in raid6_idx_to_slot()
217 if (idx == sh->pd_idx) in raid6_idx_to_slot()
219 if (idx == sh->qd_idx) in raid6_idx_to_slot()
221 if (!sh->ddf_layout) in raid6_idx_to_slot()
243 static int stripe_operations_active(struct stripe_head *sh) in stripe_operations_active() argument
245 return sh->check_state || sh->reconstruct_state || in stripe_operations_active()
246 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || in stripe_operations_active()
247 test_bit(STRIPE_COMPUTE_RUN, &sh->state); in stripe_operations_active()
250 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) in raid5_wakeup_stripe_thread() argument
252 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread()
255 int i, cpu = sh->cpu; in raid5_wakeup_stripe_thread()
259 sh->cpu = cpu; in raid5_wakeup_stripe_thread()
262 if (list_empty(&sh->lru)) { in raid5_wakeup_stripe_thread()
265 list_add_tail(&sh->lru, &group->handle_list); in raid5_wakeup_stripe_thread()
267 sh->group = group; in raid5_wakeup_stripe_thread()
275 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
279 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
286 queue_work_on(sh->cpu, raid5_wq, in raid5_wakeup_stripe_thread()
293 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
296 BUG_ON(!list_empty(&sh->lru)); in do_release_stripe()
298 if (test_bit(STRIPE_HANDLE, &sh->state)) { in do_release_stripe()
299 if (test_bit(STRIPE_DELAYED, &sh->state) && in do_release_stripe()
300 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
301 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
302 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && in do_release_stripe()
303 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
304 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
306 clear_bit(STRIPE_DELAYED, &sh->state); in do_release_stripe()
307 clear_bit(STRIPE_BIT_DELAY, &sh->state); in do_release_stripe()
309 list_add_tail(&sh->lru, &conf->handle_list); in do_release_stripe()
311 raid5_wakeup_stripe_thread(sh); in do_release_stripe()
317 BUG_ON(stripe_operations_active(sh)); in do_release_stripe()
318 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
323 if (!test_bit(STRIPE_EXPANDING, &sh->state)) in do_release_stripe()
324 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
328 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
331 if (atomic_dec_and_test(&sh->count)) in __release_stripe()
332 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
386 struct stripe_head *sh; in release_stripe_list() local
395 sh = llist_entry(head, struct stripe_head, release_list); in release_stripe_list()
399 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); in release_stripe_list()
405 hash = sh->hash_lock_index; in release_stripe_list()
406 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
413 static void release_stripe(struct stripe_head *sh) in release_stripe() argument
415 struct r5conf *conf = sh->raid_conf; in release_stripe()
423 if (atomic_add_unless(&sh->count, -1, 1)) in release_stripe()
427 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) in release_stripe()
429 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in release_stripe()
436 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { in release_stripe()
438 hash = sh->hash_lock_index; in release_stripe()
439 do_release_stripe(conf, sh, &list); in release_stripe()
446 static inline void remove_hash(struct stripe_head *sh) in remove_hash() argument
449 (unsigned long long)sh->sector); in remove_hash()
451 hlist_del_init(&sh->hash); in remove_hash()
454 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
456 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
459 (unsigned long long)sh->sector); in insert_hash()
461 hlist_add_head(&sh->hash, hp); in insert_hash()
467 struct stripe_head *sh = NULL; in get_free_stripe() local
473 sh = list_entry(first, struct stripe_head, lru); in get_free_stripe()
475 remove_hash(sh); in get_free_stripe()
477 BUG_ON(hash != sh->hash_lock_index); in get_free_stripe()
481 return sh; in get_free_stripe()
484 static void shrink_buffers(struct stripe_head *sh) in shrink_buffers() argument
488 int num = sh->raid_conf->pool_size; in shrink_buffers()
491 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); in shrink_buffers()
492 p = sh->dev[i].page; in shrink_buffers()
495 sh->dev[i].page = NULL; in shrink_buffers()
500 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) in grow_buffers() argument
503 int num = sh->raid_conf->pool_size; in grow_buffers()
511 sh->dev[i].page = page; in grow_buffers()
512 sh->dev[i].orig_page = page; in grow_buffers()
517 static void raid5_build_block(struct stripe_head *sh, int i, int previous);
519 struct stripe_head *sh);
521 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
523 struct r5conf *conf = sh->raid_conf; in init_stripe()
526 BUG_ON(atomic_read(&sh->count) != 0); in init_stripe()
527 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); in init_stripe()
528 BUG_ON(stripe_operations_active(sh)); in init_stripe()
529 BUG_ON(sh->batch_head); in init_stripe()
535 sh->generation = conf->generation - previous; in init_stripe()
536 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
537 sh->sector = sector; in init_stripe()
538 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
539 sh->state = 0; in init_stripe()
541 for (i = sh->disks; i--; ) { in init_stripe()
542 struct r5dev *dev = &sh->dev[i]; in init_stripe()
547 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
553 raid5_build_block(sh, i, previous); in init_stripe()
557 sh->overwrite_disks = 0; in init_stripe()
558 insert_hash(conf, sh); in init_stripe()
559 sh->cpu = smp_processor_id(); in init_stripe()
560 set_bit(STRIPE_BATCH_READY, &sh->state); in init_stripe()
566 struct stripe_head *sh; in __find_stripe() local
569 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
570 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
571 return sh; in __find_stripe()
662 struct stripe_head *sh; in get_active_stripe() local
673 sh = __find_stripe(conf, sector, conf->generation - previous); in get_active_stripe()
674 if (!sh) { in get_active_stripe()
676 sh = get_free_stripe(conf, hash); in get_active_stripe()
677 if (!sh && llist_empty(&conf->released_stripes) && in get_active_stripe()
682 if (noblock && sh == NULL) in get_active_stripe()
684 if (!sh) { in get_active_stripe()
698 init_stripe(sh, sector, previous); in get_active_stripe()
699 atomic_inc(&sh->count); in get_active_stripe()
701 } else if (!atomic_inc_not_zero(&sh->count)) { in get_active_stripe()
703 if (!atomic_read(&sh->count)) { in get_active_stripe()
704 if (!test_bit(STRIPE_HANDLE, &sh->state)) in get_active_stripe()
706 BUG_ON(list_empty(&sh->lru) && in get_active_stripe()
707 !test_bit(STRIPE_EXPANDING, &sh->state)); in get_active_stripe()
708 list_del_init(&sh->lru); in get_active_stripe()
709 if (sh->group) { in get_active_stripe()
710 sh->group->stripes_cnt--; in get_active_stripe()
711 sh->group = NULL; in get_active_stripe()
714 atomic_inc(&sh->count); in get_active_stripe()
717 } while (sh == NULL); in get_active_stripe()
720 return sh; in get_active_stripe()
723 static bool is_full_stripe_write(struct stripe_head *sh) in is_full_stripe_write() argument
725 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); in is_full_stripe_write()
726 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); in is_full_stripe_write()
749 static bool stripe_can_batch(struct stripe_head *sh) in stripe_can_batch() argument
751 return test_bit(STRIPE_BATCH_READY, &sh->state) && in stripe_can_batch()
752 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && in stripe_can_batch()
753 is_full_stripe_write(sh); in stripe_can_batch()
757 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) in stripe_add_to_batch_list() argument
764 if (!stripe_can_batch(sh)) in stripe_add_to_batch_list()
767 tmp_sec = sh->sector; in stripe_add_to_batch_list()
770 head_sector = sh->sector - STRIPE_SECTORS; in stripe_add_to_batch_list()
798 lock_two_stripes(head, sh); in stripe_add_to_batch_list()
800 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) in stripe_add_to_batch_list()
803 if (sh->batch_head) in stripe_add_to_batch_list()
807 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
809 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw) in stripe_add_to_batch_list()
824 list_add(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
827 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
830 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
832 list_add_tail(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
836 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in stripe_add_to_batch_list()
841 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { in stripe_add_to_batch_list()
842 int seq = sh->bm_seq; in stripe_add_to_batch_list()
843 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && in stripe_add_to_batch_list()
844 sh->batch_head->bm_seq > seq) in stripe_add_to_batch_list()
845 seq = sh->batch_head->bm_seq; in stripe_add_to_batch_list()
846 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); in stripe_add_to_batch_list()
847 sh->batch_head->bm_seq = seq; in stripe_add_to_batch_list()
850 atomic_inc(&sh->count); in stripe_add_to_batch_list()
852 unlock_two_stripes(head, sh); in stripe_add_to_batch_list()
860 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
870 if (sh->generation == conf->generation - 1) in use_new_offset()
883 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) in ops_run_io() argument
885 struct r5conf *conf = sh->raid_conf; in ops_run_io()
886 int i, disks = sh->disks; in ops_run_io()
887 struct stripe_head *head_sh = sh; in ops_run_io()
897 sh = head_sh; in ops_run_io()
898 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { in ops_run_io()
899 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) in ops_run_io()
903 if (test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_io()
905 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) in ops_run_io()
908 &sh->dev[i].flags)) { in ops_run_io()
913 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) in ops_run_io()
917 bi = &sh->dev[i].req; in ops_run_io()
918 rbi = &sh->dev[i].rreq; /* For writing to replacement */ in ops_run_io()
958 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in ops_run_io()
992 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1000 bi->bi_private = sh; in ops_run_io()
1003 __func__, (unsigned long long)sh->sector, in ops_run_io()
1005 atomic_inc(&sh->count); in ops_run_io()
1006 if (sh != head_sh) in ops_run_io()
1008 if (use_new_offset(conf, sh)) in ops_run_io()
1009 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1012 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1017 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1018 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1019 sh->dev[i].vec.bv_page = sh->dev[i].page; in ops_run_io()
1031 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); in ops_run_io()
1036 sh->dev[i].sector); in ops_run_io()
1044 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1051 rbi->bi_private = sh; in ops_run_io()
1055 __func__, (unsigned long long)sh->sector, in ops_run_io()
1057 atomic_inc(&sh->count); in ops_run_io()
1058 if (sh != head_sh) in ops_run_io()
1060 if (use_new_offset(conf, sh)) in ops_run_io()
1061 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1064 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1066 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1067 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1068 sh->dev[i].rvec.bv_page = sh->dev[i].page; in ops_run_io()
1082 sh->dev[i].sector); in ops_run_io()
1087 set_bit(STRIPE_DEGRADED, &sh->state); in ops_run_io()
1089 bi->bi_rw, i, (unsigned long long)sh->sector); in ops_run_io()
1090 clear_bit(R5_LOCKED, &sh->dev[i].flags); in ops_run_io()
1091 set_bit(STRIPE_HANDLE, &sh->state); in ops_run_io()
1096 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_io()
1098 if (sh != head_sh) in ops_run_io()
1106 struct stripe_head *sh) in async_copy_data() argument
1144 if (sh->raid_conf->skip_copy && in async_copy_data()
1168 struct stripe_head *sh = stripe_head_ref; in ops_complete_biofill() local
1173 (unsigned long long)sh->sector); in ops_complete_biofill()
1176 for (i = sh->disks; i--; ) { in ops_complete_biofill()
1177 struct r5dev *dev = &sh->dev[i]; in ops_complete_biofill()
1201 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); in ops_complete_biofill()
1205 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_biofill()
1206 release_stripe(sh); in ops_complete_biofill()
1209 static void ops_run_biofill(struct stripe_head *sh) in ops_run_biofill() argument
1215 BUG_ON(sh->batch_head); in ops_run_biofill()
1217 (unsigned long long)sh->sector); in ops_run_biofill()
1219 for (i = sh->disks; i--; ) { in ops_run_biofill()
1220 struct r5dev *dev = &sh->dev[i]; in ops_run_biofill()
1223 spin_lock_irq(&sh->stripe_lock); in ops_run_biofill()
1226 spin_unlock_irq(&sh->stripe_lock); in ops_run_biofill()
1230 dev->sector, tx, sh); in ops_run_biofill()
1236 atomic_inc(&sh->count); in ops_run_biofill()
1237 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); in ops_run_biofill()
1241 static void mark_target_uptodate(struct stripe_head *sh, int target) in mark_target_uptodate() argument
1248 tgt = &sh->dev[target]; in mark_target_uptodate()
1256 struct stripe_head *sh = stripe_head_ref; in ops_complete_compute() local
1259 (unsigned long long)sh->sector); in ops_complete_compute()
1262 mark_target_uptodate(sh, sh->ops.target); in ops_complete_compute()
1263 mark_target_uptodate(sh, sh->ops.target2); in ops_complete_compute()
1265 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); in ops_complete_compute()
1266 if (sh->check_state == check_state_compute_run) in ops_complete_compute()
1267 sh->check_state = check_state_compute_result; in ops_complete_compute()
1268 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_compute()
1269 release_stripe(sh); in ops_complete_compute()
1273 static addr_conv_t *to_addr_conv(struct stripe_head *sh, in to_addr_conv() argument
1279 return addr + sizeof(struct page *) * (sh->disks + 2); in to_addr_conv()
1292 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
1294 int disks = sh->disks; in ops_run_compute5()
1296 int target = sh->ops.target; in ops_run_compute5()
1297 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute5()
1304 BUG_ON(sh->batch_head); in ops_run_compute5()
1307 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
1312 xor_srcs[count++] = sh->dev[i].page; in ops_run_compute5()
1314 atomic_inc(&sh->count); in ops_run_compute5()
1317 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5()
1336 struct stripe_head *sh, in set_syndrome_sources() argument
1339 int disks = sh->disks; in set_syndrome_sources()
1340 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); in set_syndrome_sources()
1341 int d0_idx = raid6_d0(sh); in set_syndrome_sources()
1351 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in set_syndrome_sources()
1352 struct r5dev *dev = &sh->dev[i]; in set_syndrome_sources()
1354 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources()
1360 srcs[slot] = sh->dev[i].page; in set_syndrome_sources()
1368 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
1370 int disks = sh->disks; in ops_run_compute6_1()
1373 int qd_idx = sh->qd_idx; in ops_run_compute6_1()
1381 BUG_ON(sh->batch_head); in ops_run_compute6_1()
1382 if (sh->ops.target < 0) in ops_run_compute6_1()
1383 target = sh->ops.target2; in ops_run_compute6_1()
1384 else if (sh->ops.target2 < 0) in ops_run_compute6_1()
1385 target = sh->ops.target; in ops_run_compute6_1()
1391 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
1393 tgt = &sh->dev[target]; in ops_run_compute6_1()
1397 atomic_inc(&sh->count); in ops_run_compute6_1()
1400 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); in ops_run_compute6_1()
1404 ops_complete_compute, sh, in ops_run_compute6_1()
1405 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1413 blocks[count++] = sh->dev[i].page; in ops_run_compute6_1()
1417 NULL, ops_complete_compute, sh, in ops_run_compute6_1()
1418 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1426 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
1428 int i, count, disks = sh->disks; in ops_run_compute6_2()
1429 int syndrome_disks = sh->ddf_layout ? disks : disks-2; in ops_run_compute6_2()
1430 int d0_idx = raid6_d0(sh); in ops_run_compute6_2()
1432 int target = sh->ops.target; in ops_run_compute6_2()
1433 int target2 = sh->ops.target2; in ops_run_compute6_2()
1434 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute6_2()
1435 struct r5dev *tgt2 = &sh->dev[target2]; in ops_run_compute6_2()
1440 BUG_ON(sh->batch_head); in ops_run_compute6_2()
1442 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
1455 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in ops_run_compute6_2()
1457 blocks[slot] = sh->dev[i].page; in ops_run_compute6_2()
1470 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1472 atomic_inc(&sh->count); in ops_run_compute6_2()
1479 ops_complete_compute, sh, in ops_run_compute6_2()
1480 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1486 int qd_idx = sh->qd_idx; in ops_run_compute6_2()
1498 blocks[count++] = sh->dev[i].page; in ops_run_compute6_2()
1500 dest = sh->dev[data_target].page; in ops_run_compute6_2()
1504 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1508 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); in ops_run_compute6_2()
1510 ops_complete_compute, sh, in ops_run_compute6_2()
1511 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1517 ops_complete_compute, sh, in ops_run_compute6_2()
1518 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1535 struct stripe_head *sh = stripe_head_ref; in ops_complete_prexor() local
1538 (unsigned long long)sh->sector); in ops_complete_prexor()
1542 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor5() argument
1545 int disks = sh->disks; in ops_run_prexor5()
1547 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5()
1551 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5()
1553 BUG_ON(sh->batch_head); in ops_run_prexor5()
1555 (unsigned long long)sh->sector); in ops_run_prexor5()
1558 struct r5dev *dev = &sh->dev[i]; in ops_run_prexor5()
1565 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor5()
1572 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor6() argument
1580 (unsigned long long)sh->sector); in ops_run_prexor6()
1582 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); in ops_run_prexor6()
1585 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor6()
1592 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) in ops_run_biodrain() argument
1594 int disks = sh->disks; in ops_run_biodrain()
1596 struct stripe_head *head_sh = sh; in ops_run_biodrain()
1599 (unsigned long long)sh->sector); in ops_run_biodrain()
1605 sh = head_sh; in ops_run_biodrain()
1610 dev = &sh->dev[i]; in ops_run_biodrain()
1611 spin_lock_irq(&sh->stripe_lock); in ops_run_biodrain()
1614 sh->overwrite_disks = 0; in ops_run_biodrain()
1617 spin_unlock_irq(&sh->stripe_lock); in ops_run_biodrain()
1630 dev->sector, tx, sh); in ops_run_biodrain()
1641 sh = list_first_entry(&sh->batch_list, in ops_run_biodrain()
1644 if (sh == head_sh) in ops_run_biodrain()
1656 struct stripe_head *sh = stripe_head_ref; in ops_complete_reconstruct() local
1657 int disks = sh->disks; in ops_complete_reconstruct()
1658 int pd_idx = sh->pd_idx; in ops_complete_reconstruct()
1659 int qd_idx = sh->qd_idx; in ops_complete_reconstruct()
1664 (unsigned long long)sh->sector); in ops_complete_reconstruct()
1667 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); in ops_complete_reconstruct()
1668 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); in ops_complete_reconstruct()
1669 discard |= test_bit(R5_Discard, &sh->dev[i].flags); in ops_complete_reconstruct()
1673 struct r5dev *dev = &sh->dev[i]; in ops_complete_reconstruct()
1685 if (sh->reconstruct_state == reconstruct_state_drain_run) in ops_complete_reconstruct()
1686 sh->reconstruct_state = reconstruct_state_drain_result; in ops_complete_reconstruct()
1687 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) in ops_complete_reconstruct()
1688 sh->reconstruct_state = reconstruct_state_prexor_drain_result; in ops_complete_reconstruct()
1690 BUG_ON(sh->reconstruct_state != reconstruct_state_run); in ops_complete_reconstruct()
1691 sh->reconstruct_state = reconstruct_state_result; in ops_complete_reconstruct()
1694 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_reconstruct()
1695 release_stripe(sh); in ops_complete_reconstruct()
1699 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
1702 int disks = sh->disks; in ops_run_reconstruct5()
1705 int count, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5()
1710 struct stripe_head *head_sh = sh; in ops_run_reconstruct5()
1714 (unsigned long long)sh->sector); in ops_run_reconstruct5()
1716 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct5()
1719 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct5()
1722 if (i >= sh->disks) { in ops_run_reconstruct5()
1723 atomic_inc(&sh->count); in ops_run_reconstruct5()
1724 set_bit(R5_Discard, &sh->dev[pd_idx].flags); in ops_run_reconstruct5()
1725 ops_complete_reconstruct(sh); in ops_run_reconstruct5()
1736 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1738 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
1743 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1745 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
1757 list_first_entry(&sh->batch_list, in ops_run_reconstruct5()
1765 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
1769 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
1778 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct5()
1785 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
1791 struct stripe_head *head_sh = sh; in ops_run_reconstruct6()
1796 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
1798 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct6()
1799 if (sh->pd_idx == i || sh->qd_idx == i) in ops_run_reconstruct6()
1801 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct6()
1804 if (i >= sh->disks) { in ops_run_reconstruct6()
1805 atomic_inc(&sh->count); in ops_run_reconstruct6()
1806 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in ops_run_reconstruct6()
1807 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in ops_run_reconstruct6()
1808 ops_complete_reconstruct(sh); in ops_run_reconstruct6()
1815 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { in ops_run_reconstruct6()
1823 count = set_syndrome_sources(blocks, sh, synflags); in ops_run_reconstruct6()
1825 list_first_entry(&sh->batch_list, in ops_run_reconstruct6()
1831 head_sh, to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
1834 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
1838 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct6()
1846 struct stripe_head *sh = stripe_head_ref; in ops_complete_check() local
1849 (unsigned long long)sh->sector); in ops_complete_check()
1851 sh->check_state = check_state_check_result; in ops_complete_check()
1852 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_check()
1853 release_stripe(sh); in ops_complete_check()
1856 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
1858 int disks = sh->disks; in ops_run_check_p()
1859 int pd_idx = sh->pd_idx; in ops_run_check_p()
1860 int qd_idx = sh->qd_idx; in ops_run_check_p()
1869 (unsigned long long)sh->sector); in ops_run_check_p()
1871 BUG_ON(sh->batch_head); in ops_run_check_p()
1873 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
1878 xor_srcs[count++] = sh->dev[i].page; in ops_run_check_p()
1882 to_addr_conv(sh, percpu, 0)); in ops_run_check_p()
1884 &sh->ops.zero_sum_result, &submit); in ops_run_check_p()
1886 atomic_inc(&sh->count); in ops_run_check_p()
1887 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); in ops_run_check_p()
1891 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
1898 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
1900 BUG_ON(sh->batch_head); in ops_run_check_pq()
1901 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); in ops_run_check_pq()
1905 atomic_inc(&sh->count); in ops_run_check_pq()
1907 sh, to_addr_conv(sh, percpu, 0)); in ops_run_check_pq()
1909 &sh->ops.zero_sum_result, percpu->spare_page, &submit); in ops_run_check_pq()
1912 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) in raid_run_ops() argument
1914 int overlap_clear = 0, i, disks = sh->disks; in raid_run_ops()
1916 struct r5conf *conf = sh->raid_conf; in raid_run_ops()
1924 ops_run_biofill(sh); in raid_run_ops()
1930 tx = ops_run_compute5(sh, percpu); in raid_run_ops()
1932 if (sh->ops.target2 < 0 || sh->ops.target < 0) in raid_run_ops()
1933 tx = ops_run_compute6_1(sh, percpu); in raid_run_ops()
1935 tx = ops_run_compute6_2(sh, percpu); in raid_run_ops()
1944 tx = ops_run_prexor5(sh, percpu, tx); in raid_run_ops()
1946 tx = ops_run_prexor6(sh, percpu, tx); in raid_run_ops()
1950 tx = ops_run_biodrain(sh, tx); in raid_run_ops()
1956 ops_run_reconstruct5(sh, percpu, tx); in raid_run_ops()
1958 ops_run_reconstruct6(sh, percpu, tx); in raid_run_ops()
1962 if (sh->check_state == check_state_run) in raid_run_ops()
1963 ops_run_check_p(sh, percpu); in raid_run_ops()
1964 else if (sh->check_state == check_state_run_q) in raid_run_ops()
1965 ops_run_check_pq(sh, percpu, 0); in raid_run_ops()
1966 else if (sh->check_state == check_state_run_pq) in raid_run_ops()
1967 ops_run_check_pq(sh, percpu, 1); in raid_run_ops()
1972 if (overlap_clear && !sh->batch_head) in raid_run_ops()
1974 struct r5dev *dev = &sh->dev[i]; in raid_run_ops()
1976 wake_up(&sh->raid_conf->wait_for_overlap); in raid_run_ops()
1983 struct stripe_head *sh; in alloc_stripe() local
1985 sh = kmem_cache_zalloc(sc, gfp); in alloc_stripe()
1986 if (sh) { in alloc_stripe()
1987 spin_lock_init(&sh->stripe_lock); in alloc_stripe()
1988 spin_lock_init(&sh->batch_lock); in alloc_stripe()
1989 INIT_LIST_HEAD(&sh->batch_list); in alloc_stripe()
1990 INIT_LIST_HEAD(&sh->lru); in alloc_stripe()
1991 atomic_set(&sh->count, 1); in alloc_stripe()
1993 return sh; in alloc_stripe()
1997 struct stripe_head *sh; in grow_one_stripe() local
1999 sh = alloc_stripe(conf->slab_cache, gfp); in grow_one_stripe()
2000 if (!sh) in grow_one_stripe()
2003 sh->raid_conf = conf; in grow_one_stripe()
2005 if (grow_buffers(sh, gfp)) { in grow_one_stripe()
2006 shrink_buffers(sh); in grow_one_stripe()
2007 kmem_cache_free(conf->slab_cache, sh); in grow_one_stripe()
2010 sh->hash_lock_index = in grow_one_stripe()
2015 release_stripe(sh); in grow_one_stripe()
2259 struct stripe_head *sh; in drop_one_stripe() local
2263 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2265 if (!sh) in drop_one_stripe()
2267 BUG_ON(atomic_read(&sh->count)); in drop_one_stripe()
2268 shrink_buffers(sh); in drop_one_stripe()
2269 kmem_cache_free(conf->slab_cache, sh); in drop_one_stripe()
2288 struct stripe_head *sh = bi->bi_private; in raid5_end_read_request() local
2289 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request()
2290 int disks = sh->disks, i; in raid5_end_read_request()
2297 if (bi == &sh->dev[i].req) in raid5_end_read_request()
2301 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
2307 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2317 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2318 s = sh->sector + rdev->new_data_offset; in raid5_end_read_request()
2320 s = sh->sector + rdev->data_offset; in raid5_end_read_request()
2322 set_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2323 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in raid5_end_read_request()
2336 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2337 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2338 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2339 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2348 clear_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2350 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2367 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { in raid5_end_read_request()
2385 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2388 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { in raid5_end_read_request()
2389 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2390 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2392 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2394 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2395 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2399 rdev, sh->sector, STRIPE_SECTORS, 0))) in raid5_end_read_request()
2404 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_read_request()
2405 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_read_request()
2406 release_stripe(sh); in raid5_end_read_request()
2411 struct stripe_head *sh = bi->bi_private; in raid5_end_write_request() local
2412 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request()
2413 int disks = sh->disks, i; in raid5_end_write_request()
2421 if (bi == &sh->dev[i].req) { in raid5_end_write_request()
2425 if (bi == &sh->dev[i].rreq) { in raid5_end_write_request()
2439 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
2449 else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2452 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); in raid5_end_write_request()
2455 set_bit(STRIPE_DEGRADED, &sh->state); in raid5_end_write_request()
2457 set_bit(R5_WriteError, &sh->dev[i].flags); in raid5_end_write_request()
2461 } else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2464 set_bit(R5_MadeGood, &sh->dev[i].flags); in raid5_end_write_request()
2465 if (test_bit(R5_ReadError, &sh->dev[i].flags)) in raid5_end_write_request()
2470 set_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_write_request()
2475 if (sh->batch_head && !uptodate && !replacement) in raid5_end_write_request()
2476 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); in raid5_end_write_request()
2478 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) in raid5_end_write_request()
2479 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_write_request()
2480 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_write_request()
2481 release_stripe(sh); in raid5_end_write_request()
2483 if (sh->batch_head && sh != sh->batch_head) in raid5_end_write_request()
2484 release_stripe(sh->batch_head); in raid5_end_write_request()
2487 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
2489 static void raid5_build_block(struct stripe_head *sh, int i, int previous) in raid5_build_block() argument
2491 struct r5dev *dev = &sh->dev[i]; in raid5_build_block()
2496 dev->req.bi_private = sh; in raid5_build_block()
2501 dev->rreq.bi_private = sh; in raid5_build_block()
2504 dev->sector = compute_blocknr(sh, i, previous); in raid5_build_block()
2538 struct stripe_head *sh) in raid5_compute_sector() argument
2726 if (sh) { in raid5_compute_sector()
2727 sh->pd_idx = pd_idx; in raid5_compute_sector()
2728 sh->qd_idx = qd_idx; in raid5_compute_sector()
2729 sh->ddf_layout = ddf_layout; in raid5_compute_sector()
2738 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) in compute_blocknr() argument
2740 struct r5conf *conf = sh->raid_conf; in compute_blocknr()
2741 int raid_disks = sh->disks; in compute_blocknr()
2743 sector_t new_sector = sh->sector, check; in compute_blocknr()
2758 if (i == sh->pd_idx) in compute_blocknr()
2766 if (i > sh->pd_idx) in compute_blocknr()
2771 if (i < sh->pd_idx) in compute_blocknr()
2773 i -= (sh->pd_idx + 1); in compute_blocknr()
2785 if (i == sh->qd_idx) in compute_blocknr()
2792 if (sh->pd_idx == raid_disks-1) in compute_blocknr()
2794 else if (i > sh->pd_idx) in compute_blocknr()
2799 if (sh->pd_idx == raid_disks-1) in compute_blocknr()
2803 if (i < sh->pd_idx) in compute_blocknr()
2805 i -= (sh->pd_idx + 2); in compute_blocknr()
2815 if (sh->pd_idx == 0) in compute_blocknr()
2819 if (i < sh->pd_idx) in compute_blocknr()
2821 i -= (sh->pd_idx + 1); in compute_blocknr()
2826 if (i > sh->pd_idx) in compute_blocknr()
2831 if (i < sh->pd_idx) in compute_blocknr()
2833 i -= (sh->pd_idx + 1); in compute_blocknr()
2849 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in compute_blocknr()
2850 || sh2.qd_idx != sh->qd_idx) { in compute_blocknr()
2859 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, in schedule_reconstruction() argument
2862 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; in schedule_reconstruction()
2863 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction()
2869 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
2887 sh->reconstruct_state = reconstruct_state_drain_run; in schedule_reconstruction()
2890 sh->reconstruct_state = reconstruct_state_run; in schedule_reconstruction()
2895 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) in schedule_reconstruction()
2898 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
2899 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
2901 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || in schedule_reconstruction()
2902 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); in schedule_reconstruction()
2905 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
2921 sh->reconstruct_state = reconstruct_state_prexor_drain_run; in schedule_reconstruction()
2930 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
2931 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
2935 int qd_idx = sh->qd_idx; in schedule_reconstruction()
2936 struct r5dev *dev = &sh->dev[qd_idx]; in schedule_reconstruction()
2944 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
2953 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in add_stripe_bio() argument
2957 struct r5conf *conf = sh->raid_conf; in add_stripe_bio()
2962 (unsigned long long)sh->sector); in add_stripe_bio()
2972 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
2974 if (sh->batch_head) in add_stripe_bio()
2977 bip = &sh->dev[dd_idx].towrite; in add_stripe_bio()
2981 bip = &sh->dev[dd_idx].toread; in add_stripe_bio()
2991 clear_bit(STRIPE_BATCH_READY, &sh->state); in add_stripe_bio()
3001 sector_t sector = sh->dev[dd_idx].sector; in add_stripe_bio()
3002 for (bi=sh->dev[dd_idx].towrite; in add_stripe_bio()
3003 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && in add_stripe_bio()
3005 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
3009 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) in add_stripe_bio()
3010 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) in add_stripe_bio()
3011 sh->overwrite_disks++; in add_stripe_bio()
3016 (unsigned long long)sh->sector, dd_idx); in add_stripe_bio()
3031 set_bit(STRIPE_BITMAP_PENDING, &sh->state); in add_stripe_bio()
3032 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3033 bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3035 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
3036 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); in add_stripe_bio()
3037 if (!sh->batch_head) { in add_stripe_bio()
3038 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
3039 set_bit(STRIPE_BIT_DELAY, &sh->state); in add_stripe_bio()
3042 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3044 if (stripe_can_batch(sh)) in add_stripe_bio()
3045 stripe_add_to_batch_list(conf, sh); in add_stripe_bio()
3049 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
3050 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3057 struct stripe_head *sh) in stripe_set_idx() argument
3069 &dd_idx, sh); in stripe_set_idx()
3073 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3078 BUG_ON(sh->batch_head); in handle_failed_stripe()
3083 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in handle_failed_stripe()
3095 sh->sector, in handle_failed_stripe()
3101 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3103 bi = sh->dev[i].towrite; in handle_failed_stripe()
3104 sh->dev[i].towrite = NULL; in handle_failed_stripe()
3105 sh->overwrite_disks = 0; in handle_failed_stripe()
3106 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3110 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3114 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3115 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3125 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3129 bi = sh->dev[i].written; in handle_failed_stripe()
3130 sh->dev[i].written = NULL; in handle_failed_stripe()
3131 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { in handle_failed_stripe()
3132 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_failed_stripe()
3133 sh->dev[i].page = sh->dev[i].orig_page; in handle_failed_stripe()
3138 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3139 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3152 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && in handle_failed_stripe()
3153 (!test_bit(R5_Insync, &sh->dev[i].flags) || in handle_failed_stripe()
3154 test_bit(R5_ReadError, &sh->dev[i].flags))) { in handle_failed_stripe()
3155 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3156 bi = sh->dev[i].toread; in handle_failed_stripe()
3157 sh->dev[i].toread = NULL; in handle_failed_stripe()
3158 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3159 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3162 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3164 r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3174 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3179 clear_bit(R5_LOCKED, &sh->dev[i].flags); in handle_failed_stripe()
3182 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_failed_stripe()
3188 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3194 BUG_ON(sh->batch_head); in handle_failed_sync()
3195 clear_bit(STRIPE_SYNCING, &sh->state); in handle_failed_sync()
3196 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_failed_sync()
3216 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3223 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3234 static int want_replace(struct stripe_head *sh, int disk_idx) in want_replace() argument
3239 rdev = sh->raid_conf->disks[disk_idx].replacement; in want_replace()
3243 && (rdev->recovery_offset <= sh->sector in want_replace()
3244 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3257 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, in need_this_block() argument
3260 struct r5dev *dev = &sh->dev[disk_idx]; in need_this_block()
3261 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], in need_this_block()
3262 &sh->dev[s->failed_num[1]] }; in need_this_block()
3279 (s->replacing && want_replace(sh, disk_idx))) in need_this_block()
3304 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in need_this_block()
3332 if (sh->raid_conf->level != 6 && in need_this_block()
3333 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3337 if (s->failed_num[i] != sh->pd_idx && in need_this_block()
3338 s->failed_num[i] != sh->qd_idx && in need_this_block()
3347 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, in fetch_block() argument
3350 struct r5dev *dev = &sh->dev[disk_idx]; in fetch_block()
3353 if (need_this_block(sh, s, disk_idx, disks)) { in fetch_block()
3359 BUG_ON(sh->batch_head); in fetch_block()
3367 (unsigned long long)sh->sector, disk_idx); in fetch_block()
3368 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3371 sh->ops.target = disk_idx; in fetch_block()
3372 sh->ops.target2 = -1; /* no 2nd target */ in fetch_block()
3391 &sh->dev[other].flags)) in fetch_block()
3396 (unsigned long long)sh->sector, in fetch_block()
3398 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3400 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); in fetch_block()
3401 set_bit(R5_Wantcompute, &sh->dev[other].flags); in fetch_block()
3402 sh->ops.target = disk_idx; in fetch_block()
3403 sh->ops.target2 = other; in fetch_block()
3422 static void handle_stripe_fill(struct stripe_head *sh, in handle_stripe_fill() argument
3432 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && in handle_stripe_fill()
3433 !sh->reconstruct_state) in handle_stripe_fill()
3435 if (fetch_block(sh, s, i, disks)) in handle_stripe_fill()
3437 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_fill()
3448 struct stripe_head *sh, int disks, struct bio **return_bi) in handle_stripe_clean_event() argument
3453 struct stripe_head *head_sh = sh; in handle_stripe_clean_event()
3457 if (sh->dev[i].written) { in handle_stripe_clean_event()
3458 dev = &sh->dev[i]; in handle_stripe_clean_event()
3487 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3489 !test_bit(STRIPE_DEGRADED, &sh->state), in handle_stripe_clean_event()
3492 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
3495 if (sh != head_sh) { in handle_stripe_clean_event()
3496 dev = &sh->dev[i]; in handle_stripe_clean_event()
3500 sh = head_sh; in handle_stripe_clean_event()
3501 dev = &sh->dev[i]; in handle_stripe_clean_event()
3508 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_clean_event()
3510 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
3511 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
3512 if (sh->qd_idx >= 0) { in handle_stripe_clean_event()
3513 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
3514 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
3517 clear_bit(STRIPE_DISCARD, &sh->state); in handle_stripe_clean_event()
3524 hash = sh->hash_lock_index; in handle_stripe_clean_event()
3526 remove_hash(sh); in handle_stripe_clean_event()
3529 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
3531 if (sh != head_sh) in handle_stripe_clean_event()
3534 sh = head_sh; in handle_stripe_clean_event()
3536 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) in handle_stripe_clean_event()
3537 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_clean_event()
3541 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_stripe_clean_event()
3550 struct stripe_head *sh, in handle_stripe_dirtying() argument
3565 (recovery_cp < MaxSector && sh->sector >= recovery_cp && in handle_stripe_dirtying()
3573 (unsigned long long)sh->sector); in handle_stripe_dirtying()
3576 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3577 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe_dirtying()
3588 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
3599 (unsigned long long)sh->sector, rmw, rcw); in handle_stripe_dirtying()
3600 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
3606 (unsigned long long)sh->sector, rmw); in handle_stripe_dirtying()
3608 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3609 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe_dirtying()
3615 &sh->state)) { in handle_stripe_dirtying()
3622 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
3623 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
3633 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3635 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
3642 &sh->state)) { in handle_stripe_dirtying()
3650 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
3651 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
3657 (unsigned long long)sh->sector, in handle_stripe_dirtying()
3658 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); in handle_stripe_dirtying()
3662 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe_dirtying()
3663 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
3675 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && in handle_stripe_dirtying()
3677 !test_bit(STRIPE_BIT_DELAY, &sh->state))) in handle_stripe_dirtying()
3678 schedule_reconstruction(sh, s, rcw == 0, 0); in handle_stripe_dirtying()
3681 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
3686 BUG_ON(sh->batch_head); in handle_parity_checks5()
3687 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks5()
3689 switch (sh->check_state) { in handle_parity_checks5()
3694 sh->check_state = check_state_run; in handle_parity_checks5()
3696 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
3700 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks5()
3703 sh->check_state = check_state_idle; in handle_parity_checks5()
3705 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
3708 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks5()
3719 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks5()
3720 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
3725 sh->check_state = check_state_idle; in handle_parity_checks5()
3737 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) in handle_parity_checks5()
3741 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
3746 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
3748 sh->check_state = check_state_compute_run; in handle_parity_checks5()
3749 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks5()
3752 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
3753 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
3754 sh->ops.target2 = -1; in handle_parity_checks5()
3763 __func__, sh->check_state, in handle_parity_checks5()
3764 (unsigned long long) sh->sector); in handle_parity_checks5()
3769 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
3773 int pd_idx = sh->pd_idx; in handle_parity_checks6()
3774 int qd_idx = sh->qd_idx; in handle_parity_checks6()
3777 BUG_ON(sh->batch_head); in handle_parity_checks6()
3778 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks6()
3788 switch (sh->check_state) { in handle_parity_checks6()
3796 sh->check_state = check_state_run; in handle_parity_checks6()
3802 if (sh->check_state == check_state_run) in handle_parity_checks6()
3803 sh->check_state = check_state_run_pq; in handle_parity_checks6()
3805 sh->check_state = check_state_run_q; in handle_parity_checks6()
3809 sh->ops.zero_sum_result = 0; in handle_parity_checks6()
3811 if (sh->check_state == check_state_run) { in handle_parity_checks6()
3813 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
3816 if (sh->check_state >= check_state_run && in handle_parity_checks6()
3817 sh->check_state <= check_state_run_pq) { in handle_parity_checks6()
3829 sh->check_state = check_state_idle; in handle_parity_checks6()
3832 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks6()
3840 dev = &sh->dev[s->failed_num[1]]; in handle_parity_checks6()
3846 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks6()
3851 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
3852 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
3857 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
3858 dev = &sh->dev[qd_idx]; in handle_parity_checks6()
3863 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks6()
3865 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
3872 sh->check_state = check_state_idle; in handle_parity_checks6()
3878 if (sh->ops.zero_sum_result == 0) { in handle_parity_checks6()
3881 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
3887 sh->check_state = check_state_compute_result; in handle_parity_checks6()
3898 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
3900 int *target = &sh->ops.target; in handle_parity_checks6()
3902 sh->ops.target = -1; in handle_parity_checks6()
3903 sh->ops.target2 = -1; in handle_parity_checks6()
3904 sh->check_state = check_state_compute_run; in handle_parity_checks6()
3905 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks6()
3907 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
3909 &sh->dev[pd_idx].flags); in handle_parity_checks6()
3911 target = &sh->ops.target2; in handle_parity_checks6()
3914 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
3916 &sh->dev[qd_idx].flags); in handle_parity_checks6()
3927 __func__, sh->check_state, in handle_parity_checks6()
3928 (unsigned long long) sh->sector); in handle_parity_checks6()
3933 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
3941 BUG_ON(sh->batch_head); in handle_stripe_expansion()
3942 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); in handle_stripe_expansion()
3943 for (i = 0; i < sh->disks; i++) in handle_stripe_expansion()
3944 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
3949 sector_t bn = compute_blocknr(sh, i, 1); in handle_stripe_expansion()
3969 sh->dev[i].page, 0, 0, STRIPE_SIZE, in handle_stripe_expansion()
4004 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) in analyse_stripe() argument
4006 struct r5conf *conf = sh->raid_conf; in analyse_stripe()
4007 int disks = sh->disks; in analyse_stripe()
4014 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; in analyse_stripe()
4015 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; in analyse_stripe()
4027 dev = &sh->dev[i]; in analyse_stripe()
4038 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) in analyse_stripe()
4067 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && in analyse_stripe()
4068 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
4080 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
4107 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) in analyse_stripe()
4166 if (test_bit(STRIPE_SYNCING, &sh->state)) { in analyse_stripe()
4176 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4185 static int clear_batch_ready(struct stripe_head *sh) in clear_batch_ready() argument
4192 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) in clear_batch_ready()
4193 return (sh->batch_head && sh->batch_head != sh); in clear_batch_ready()
4194 spin_lock(&sh->stripe_lock); in clear_batch_ready()
4195 if (!sh->batch_head) { in clear_batch_ready()
4196 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4204 if (sh->batch_head != sh) { in clear_batch_ready()
4205 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4208 spin_lock(&sh->batch_lock); in clear_batch_ready()
4209 list_for_each_entry(tmp, &sh->batch_list, batch_list) in clear_batch_ready()
4211 spin_unlock(&sh->batch_lock); in clear_batch_ready()
4212 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4224 struct stripe_head *sh, *next; in break_stripe_batch_list() local
4228 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { in break_stripe_batch_list()
4230 list_del_init(&sh->batch_list); in break_stripe_batch_list()
4232 WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | in break_stripe_batch_list()
4248 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | in break_stripe_batch_list()
4253 sh->check_state = head_sh->check_state; in break_stripe_batch_list()
4254 sh->reconstruct_state = head_sh->reconstruct_state; in break_stripe_batch_list()
4255 for (i = 0; i < sh->disks; i++) { in break_stripe_batch_list()
4256 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in break_stripe_batch_list()
4258 sh->dev[i].flags = head_sh->dev[i].flags & in break_stripe_batch_list()
4261 spin_lock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4262 sh->batch_head = NULL; in break_stripe_batch_list()
4263 spin_unlock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4265 sh->state & handle_flags) in break_stripe_batch_list()
4266 set_bit(STRIPE_HANDLE, &sh->state); in break_stripe_batch_list()
4267 release_stripe(sh); in break_stripe_batch_list()
4282 static void handle_stripe(struct stripe_head *sh) in handle_stripe() argument
4285 struct r5conf *conf = sh->raid_conf; in handle_stripe()
4288 int disks = sh->disks; in handle_stripe()
4291 clear_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4292 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { in handle_stripe()
4295 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4299 if (clear_batch_ready(sh) ) { in handle_stripe()
4300 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
4304 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) in handle_stripe()
4305 break_stripe_batch_list(sh, 0); in handle_stripe()
4307 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { in handle_stripe()
4308 spin_lock(&sh->stripe_lock); in handle_stripe()
4310 if (!test_bit(STRIPE_DISCARD, &sh->state) && in handle_stripe()
4311 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { in handle_stripe()
4312 set_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4313 clear_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4314 clear_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4316 spin_unlock(&sh->stripe_lock); in handle_stripe()
4318 clear_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4322 (unsigned long long)sh->sector, sh->state, in handle_stripe()
4323 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
4324 sh->check_state, sh->reconstruct_state); in handle_stripe()
4326 analyse_stripe(sh, &s); in handle_stripe()
4329 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4336 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4344 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { in handle_stripe()
4346 set_bit(STRIPE_BIOFILL_RUN, &sh->state); in handle_stripe()
4357 sh->check_state = 0; in handle_stripe()
4358 sh->reconstruct_state = 0; in handle_stripe()
4359 break_stripe_batch_list(sh, 0); in handle_stripe()
4361 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); in handle_stripe()
4363 handle_failed_sync(conf, sh, &s); in handle_stripe()
4370 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) in handle_stripe()
4372 if (sh->reconstruct_state == reconstruct_state_drain_result || in handle_stripe()
4373 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { in handle_stripe()
4374 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
4379 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && in handle_stripe()
4380 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
4381 BUG_ON(sh->qd_idx >= 0 && in handle_stripe()
4382 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && in handle_stripe()
4383 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); in handle_stripe()
4385 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
4387 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
4396 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
4398 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4401 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe()
4409 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
4410 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
4411 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
4412 qdev = &sh->dev[sh->qd_idx]; in handle_stripe()
4413 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) in handle_stripe()
4414 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) in handle_stripe()
4426 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); in handle_stripe()
4437 handle_stripe_fill(sh, &s, disks); in handle_stripe()
4445 if (s.to_write && !sh->reconstruct_state && !sh->check_state) in handle_stripe()
4446 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
4453 if (sh->check_state || in handle_stripe()
4455 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
4456 !test_bit(STRIPE_INSYNC, &sh->state))) { in handle_stripe()
4458 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
4460 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
4464 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) in handle_stripe()
4465 && !test_bit(STRIPE_REPLACED, &sh->state)) { in handle_stripe()
4468 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { in handle_stripe()
4469 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_stripe()
4470 set_bit(R5_WantReplace, &sh->dev[i].flags); in handle_stripe()
4471 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
4475 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4476 set_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4479 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
4480 test_bit(STRIPE_INSYNC, &sh->state)) { in handle_stripe()
4482 clear_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4483 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_stripe()
4492 struct r5dev *dev = &sh->dev[s.failed_num[i]]; in handle_stripe()
4512 if (sh->reconstruct_state == reconstruct_state_result) { in handle_stripe()
4514 = get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
4519 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4520 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4530 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
4531 clear_bit(STRIPE_EXPANDING, &sh->state); in handle_stripe()
4533 set_bit(R5_Wantwrite, &sh->dev[i].flags); in handle_stripe()
4534 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
4539 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && in handle_stripe()
4540 !sh->reconstruct_state) { in handle_stripe()
4542 sh->disks = conf->raid_disks; in handle_stripe()
4543 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
4544 schedule_reconstruction(sh, &s, 1, 1); in handle_stripe()
4545 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { in handle_stripe()
4546 clear_bit(STRIPE_EXPAND_READY, &sh->state); in handle_stripe()
4553 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) in handle_stripe()
4554 handle_stripe_expansion(conf, sh); in handle_stripe()
4574 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
4578 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
4585 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
4594 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
4601 raid_run_ops(sh, s.ops_request); in handle_stripe()
4603 ops_run_io(sh, &s); in handle_stripe()
4618 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
4626 struct stripe_head *sh; in raid5_activate_delayed() local
4627 sh = list_entry(l, struct stripe_head, lru); in raid5_activate_delayed()
4629 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_activate_delayed()
4630 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_activate_delayed()
4632 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
4633 raid5_wakeup_stripe_thread(sh); in raid5_activate_delayed()
4646 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); in activate_bit_delay() local
4648 list_del_init(&sh->lru); in activate_bit_delay()
4649 atomic_inc(&sh->count); in activate_bit_delay()
4650 hash = sh->hash_lock_index; in activate_bit_delay()
4651 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
4908 struct stripe_head *sh = NULL, *tmp; in __get_priority_stripe() local
4934 sh = list_entry(handle_list->next, typeof(*sh), lru); in __get_priority_stripe()
4938 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { in __get_priority_stripe()
4958 sh = tmp; in __get_priority_stripe()
4963 if (sh) { in __get_priority_stripe()
4971 if (!sh) in __get_priority_stripe()
4976 sh->group = NULL; in __get_priority_stripe()
4978 list_del_init(&sh->lru); in __get_priority_stripe()
4979 BUG_ON(atomic_inc_return(&sh->count) != 1); in __get_priority_stripe()
4980 return sh; in __get_priority_stripe()
4993 struct stripe_head *sh; in raid5_unplug() local
5002 sh = list_first_entry(&cb->list, struct stripe_head, lru); in raid5_unplug()
5003 list_del_init(&sh->lru); in raid5_unplug()
5010 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); in raid5_unplug()
5015 hash = sh->hash_lock_index; in raid5_unplug()
5016 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5029 struct stripe_head *sh) in release_stripe_plug() argument
5037 release_stripe(sh); in release_stripe_plug()
5050 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) in release_stripe_plug()
5051 list_add_tail(&sh->lru, &cb->list); in release_stripe_plug()
5053 release_stripe(sh); in release_stripe_plug()
5060 struct stripe_head *sh; in make_discard_request() local
5088 sh = get_active_stripe(conf, logical_sector, 0, 0, 0); in make_discard_request()
5091 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5092 if (test_bit(STRIPE_SYNCING, &sh->state)) { in make_discard_request()
5093 release_stripe(sh); in make_discard_request()
5097 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5098 spin_lock_irq(&sh->stripe_lock); in make_discard_request()
5100 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5102 if (sh->dev[d].towrite || sh->dev[d].toread) { in make_discard_request()
5103 set_bit(R5_Overlap, &sh->dev[d].flags); in make_discard_request()
5104 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5105 release_stripe(sh); in make_discard_request()
5110 set_bit(STRIPE_DISCARD, &sh->state); in make_discard_request()
5112 sh->overwrite_disks = 0; in make_discard_request()
5114 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5116 sh->dev[d].towrite = bi; in make_discard_request()
5117 set_bit(R5_OVERWRITE, &sh->dev[d].flags); in make_discard_request()
5119 sh->overwrite_disks++; in make_discard_request()
5121 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5127 sh->sector, in make_discard_request()
5130 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5131 set_bit(STRIPE_BIT_DELAY, &sh->state); in make_discard_request()
5134 set_bit(STRIPE_HANDLE, &sh->state); in make_discard_request()
5135 clear_bit(STRIPE_DELAYED, &sh->state); in make_discard_request()
5136 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_discard_request()
5138 release_stripe_plug(mddev, sh); in make_discard_request()
5154 struct stripe_head *sh; in make_request() local
5233 sh = get_active_stripe(conf, new_sector, previous, in make_request()
5235 if (sh) { in make_request()
5254 release_stripe(sh); in make_request()
5264 release_stripe(sh); in make_request()
5271 release_stripe(sh); in make_request()
5287 if (test_bit(STRIPE_EXPANDING, &sh->state) || in make_request()
5288 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { in make_request()
5294 release_stripe(sh); in make_request()
5299 set_bit(STRIPE_HANDLE, &sh->state); in make_request()
5300 clear_bit(STRIPE_DELAYED, &sh->state); in make_request()
5301 if ((!sh->batch_head || sh == sh->batch_head) && in make_request()
5303 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_request()
5305 release_stripe_plug(mddev, sh); in make_request()
5340 struct stripe_head *sh; in reshape_request() local
5472 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
5473 set_bit(STRIPE_EXPANDING, &sh->state); in reshape_request()
5478 for (j=sh->disks; j--;) { in reshape_request()
5480 if (j == sh->pd_idx) in reshape_request()
5483 j == sh->qd_idx) in reshape_request()
5485 s = compute_blocknr(sh, j, 0); in reshape_request()
5490 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); in reshape_request()
5491 set_bit(R5_Expanded, &sh->dev[j].flags); in reshape_request()
5492 set_bit(R5_UPTODATE, &sh->dev[j].flags); in reshape_request()
5495 set_bit(STRIPE_EXPAND_READY, &sh->state); in reshape_request()
5496 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
5498 list_add(&sh->lru, &stripes); in reshape_request()
5521 sh = get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
5522 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); in reshape_request()
5523 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
5524 release_stripe(sh); in reshape_request()
5531 sh = list_entry(stripes.next, struct stripe_head, lru); in reshape_request()
5532 list_del_init(&sh->lru); in reshape_request()
5533 release_stripe(sh); in reshape_request()
5570 struct stripe_head *sh; in sync_request() local
5628 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); in sync_request()
5629 if (sh == NULL) { in sync_request()
5630 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); in sync_request()
5651 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); in sync_request()
5652 set_bit(STRIPE_HANDLE, &sh->state); in sync_request()
5654 release_stripe(sh); in sync_request()
5671 struct stripe_head *sh; in retry_aligned_read() local
5693 sh = get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()
5695 if (!sh) { in retry_aligned_read()
5702 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { in retry_aligned_read()
5703 release_stripe(sh); in retry_aligned_read()
5709 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); in retry_aligned_read()
5710 handle_stripe(sh); in retry_aligned_read()
5711 release_stripe(sh); in retry_aligned_read()
5729 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local
5734 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
5735 batch[batch_size++] = sh; in handle_active_stripes()