Lines Matching refs:sh
188 static inline int raid6_d0(struct stripe_head *sh) in raid6_d0() argument
190 if (sh->ddf_layout) in raid6_d0()
194 if (sh->qd_idx == sh->disks - 1) in raid6_d0()
197 return sh->qd_idx + 1; in raid6_d0()
210 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, in raid6_idx_to_slot() argument
215 if (sh->ddf_layout) in raid6_idx_to_slot()
217 if (idx == sh->pd_idx) in raid6_idx_to_slot()
219 if (idx == sh->qd_idx) in raid6_idx_to_slot()
221 if (!sh->ddf_layout) in raid6_idx_to_slot()
239 static int stripe_operations_active(struct stripe_head *sh) in stripe_operations_active() argument
241 return sh->check_state || sh->reconstruct_state || in stripe_operations_active()
242 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || in stripe_operations_active()
243 test_bit(STRIPE_COMPUTE_RUN, &sh->state); in stripe_operations_active()
246 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) in raid5_wakeup_stripe_thread() argument
248 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread()
251 int i, cpu = sh->cpu; in raid5_wakeup_stripe_thread()
255 sh->cpu = cpu; in raid5_wakeup_stripe_thread()
258 if (list_empty(&sh->lru)) { in raid5_wakeup_stripe_thread()
261 list_add_tail(&sh->lru, &group->handle_list); in raid5_wakeup_stripe_thread()
263 sh->group = group; in raid5_wakeup_stripe_thread()
271 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
275 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
282 queue_work_on(sh->cpu, raid5_wq, in raid5_wakeup_stripe_thread()
289 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
292 BUG_ON(!list_empty(&sh->lru)); in do_release_stripe()
294 if (test_bit(STRIPE_HANDLE, &sh->state)) { in do_release_stripe()
295 if (test_bit(STRIPE_DELAYED, &sh->state) && in do_release_stripe()
296 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
297 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
298 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && in do_release_stripe()
299 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
300 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
302 clear_bit(STRIPE_DELAYED, &sh->state); in do_release_stripe()
303 clear_bit(STRIPE_BIT_DELAY, &sh->state); in do_release_stripe()
305 list_add_tail(&sh->lru, &conf->handle_list); in do_release_stripe()
307 raid5_wakeup_stripe_thread(sh); in do_release_stripe()
313 BUG_ON(stripe_operations_active(sh)); in do_release_stripe()
314 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
319 if (!test_bit(STRIPE_EXPANDING, &sh->state)) in do_release_stripe()
320 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
324 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
327 if (atomic_dec_and_test(&sh->count)) in __release_stripe()
328 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
384 struct stripe_head *sh; in release_stripe_list() local
393 sh = llist_entry(head, struct stripe_head, release_list); in release_stripe_list()
397 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); in release_stripe_list()
403 hash = sh->hash_lock_index; in release_stripe_list()
404 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
411 void raid5_release_stripe(struct stripe_head *sh) in raid5_release_stripe() argument
413 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe()
421 if (atomic_add_unless(&sh->count, -1, 1)) in raid5_release_stripe()
425 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) in raid5_release_stripe()
427 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
434 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { in raid5_release_stripe()
436 hash = sh->hash_lock_index; in raid5_release_stripe()
437 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
444 static inline void remove_hash(struct stripe_head *sh) in remove_hash() argument
447 (unsigned long long)sh->sector); in remove_hash()
449 hlist_del_init(&sh->hash); in remove_hash()
452 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
454 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
457 (unsigned long long)sh->sector); in insert_hash()
459 hlist_add_head(&sh->hash, hp); in insert_hash()
465 struct stripe_head *sh = NULL; in get_free_stripe() local
471 sh = list_entry(first, struct stripe_head, lru); in get_free_stripe()
473 remove_hash(sh); in get_free_stripe()
475 BUG_ON(hash != sh->hash_lock_index); in get_free_stripe()
479 return sh; in get_free_stripe()
482 static void shrink_buffers(struct stripe_head *sh) in shrink_buffers() argument
486 int num = sh->raid_conf->pool_size; in shrink_buffers()
489 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); in shrink_buffers()
490 p = sh->dev[i].page; in shrink_buffers()
493 sh->dev[i].page = NULL; in shrink_buffers()
498 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) in grow_buffers() argument
501 int num = sh->raid_conf->pool_size; in grow_buffers()
509 sh->dev[i].page = page; in grow_buffers()
510 sh->dev[i].orig_page = page; in grow_buffers()
515 static void raid5_build_block(struct stripe_head *sh, int i, int previous);
517 struct stripe_head *sh);
519 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
521 struct r5conf *conf = sh->raid_conf; in init_stripe()
524 BUG_ON(atomic_read(&sh->count) != 0); in init_stripe()
525 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); in init_stripe()
526 BUG_ON(stripe_operations_active(sh)); in init_stripe()
527 BUG_ON(sh->batch_head); in init_stripe()
533 sh->generation = conf->generation - previous; in init_stripe()
534 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
535 sh->sector = sector; in init_stripe()
536 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
537 sh->state = 0; in init_stripe()
539 for (i = sh->disks; i--; ) { in init_stripe()
540 struct r5dev *dev = &sh->dev[i]; in init_stripe()
545 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
551 raid5_build_block(sh, i, previous); in init_stripe()
555 sh->overwrite_disks = 0; in init_stripe()
556 insert_hash(conf, sh); in init_stripe()
557 sh->cpu = smp_processor_id(); in init_stripe()
558 set_bit(STRIPE_BATCH_READY, &sh->state); in init_stripe()
564 struct stripe_head *sh; in __find_stripe() local
567 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
568 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
569 return sh; in __find_stripe()
660 struct stripe_head *sh; in raid5_get_active_stripe() local
671 sh = __find_stripe(conf, sector, conf->generation - previous); in raid5_get_active_stripe()
672 if (!sh) { in raid5_get_active_stripe()
674 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
675 if (!sh && !test_bit(R5_DID_ALLOC, in raid5_get_active_stripe()
680 if (noblock && sh == NULL) in raid5_get_active_stripe()
682 if (!sh) { in raid5_get_active_stripe()
696 init_stripe(sh, sector, previous); in raid5_get_active_stripe()
697 atomic_inc(&sh->count); in raid5_get_active_stripe()
699 } else if (!atomic_inc_not_zero(&sh->count)) { in raid5_get_active_stripe()
701 if (!atomic_read(&sh->count)) { in raid5_get_active_stripe()
702 if (!test_bit(STRIPE_HANDLE, &sh->state)) in raid5_get_active_stripe()
704 BUG_ON(list_empty(&sh->lru) && in raid5_get_active_stripe()
705 !test_bit(STRIPE_EXPANDING, &sh->state)); in raid5_get_active_stripe()
706 list_del_init(&sh->lru); in raid5_get_active_stripe()
707 if (sh->group) { in raid5_get_active_stripe()
708 sh->group->stripes_cnt--; in raid5_get_active_stripe()
709 sh->group = NULL; in raid5_get_active_stripe()
712 atomic_inc(&sh->count); in raid5_get_active_stripe()
715 } while (sh == NULL); in raid5_get_active_stripe()
718 return sh; in raid5_get_active_stripe()
721 static bool is_full_stripe_write(struct stripe_head *sh) in is_full_stripe_write() argument
723 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); in is_full_stripe_write()
724 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); in is_full_stripe_write()
747 static bool stripe_can_batch(struct stripe_head *sh) in stripe_can_batch() argument
749 struct r5conf *conf = sh->raid_conf; in stripe_can_batch()
753 return test_bit(STRIPE_BATCH_READY, &sh->state) && in stripe_can_batch()
754 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && in stripe_can_batch()
755 is_full_stripe_write(sh); in stripe_can_batch()
759 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) in stripe_add_to_batch_list() argument
766 if (!stripe_can_batch(sh)) in stripe_add_to_batch_list()
769 tmp_sec = sh->sector; in stripe_add_to_batch_list()
772 head_sector = sh->sector - STRIPE_SECTORS; in stripe_add_to_batch_list()
800 lock_two_stripes(head, sh); in stripe_add_to_batch_list()
802 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) in stripe_add_to_batch_list()
805 if (sh->batch_head) in stripe_add_to_batch_list()
809 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
811 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw) in stripe_add_to_batch_list()
826 list_add(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
829 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
832 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
834 list_add_tail(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
838 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in stripe_add_to_batch_list()
843 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { in stripe_add_to_batch_list()
844 int seq = sh->bm_seq; in stripe_add_to_batch_list()
845 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && in stripe_add_to_batch_list()
846 sh->batch_head->bm_seq > seq) in stripe_add_to_batch_list()
847 seq = sh->batch_head->bm_seq; in stripe_add_to_batch_list()
848 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); in stripe_add_to_batch_list()
849 sh->batch_head->bm_seq = seq; in stripe_add_to_batch_list()
852 atomic_inc(&sh->count); in stripe_add_to_batch_list()
854 unlock_two_stripes(head, sh); in stripe_add_to_batch_list()
862 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
872 if (sh->generation == conf->generation - 1) in use_new_offset()
885 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) in ops_run_io() argument
887 struct r5conf *conf = sh->raid_conf; in ops_run_io()
888 int i, disks = sh->disks; in ops_run_io()
889 struct stripe_head *head_sh = sh; in ops_run_io()
893 if (r5l_write_stripe(conf->log, sh) == 0) in ops_run_io()
901 sh = head_sh; in ops_run_io()
902 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { in ops_run_io()
903 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) in ops_run_io()
907 if (test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_io()
909 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) in ops_run_io()
912 &sh->dev[i].flags)) { in ops_run_io()
917 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) in ops_run_io()
921 bi = &sh->dev[i].req; in ops_run_io()
922 rbi = &sh->dev[i].rreq; /* For writing to replacement */ in ops_run_io()
962 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in ops_run_io()
996 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1004 bi->bi_private = sh; in ops_run_io()
1007 __func__, (unsigned long long)sh->sector, in ops_run_io()
1009 atomic_inc(&sh->count); in ops_run_io()
1010 if (sh != head_sh) in ops_run_io()
1012 if (use_new_offset(conf, sh)) in ops_run_io()
1013 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1016 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1021 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1022 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1023 sh->dev[i].vec.bv_page = sh->dev[i].page; in ops_run_io()
1035 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); in ops_run_io()
1040 sh->dev[i].sector); in ops_run_io()
1048 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1055 rbi->bi_private = sh; in ops_run_io()
1059 __func__, (unsigned long long)sh->sector, in ops_run_io()
1061 atomic_inc(&sh->count); in ops_run_io()
1062 if (sh != head_sh) in ops_run_io()
1064 if (use_new_offset(conf, sh)) in ops_run_io()
1065 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1068 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1070 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1071 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1072 sh->dev[i].rvec.bv_page = sh->dev[i].page; in ops_run_io()
1086 sh->dev[i].sector); in ops_run_io()
1091 set_bit(STRIPE_DEGRADED, &sh->state); in ops_run_io()
1093 bi->bi_rw, i, (unsigned long long)sh->sector); in ops_run_io()
1094 clear_bit(R5_LOCKED, &sh->dev[i].flags); in ops_run_io()
1095 set_bit(STRIPE_HANDLE, &sh->state); in ops_run_io()
1100 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_io()
1102 if (sh != head_sh) in ops_run_io()
1110 struct stripe_head *sh) in async_copy_data() argument
1148 if (sh->raid_conf->skip_copy && in async_copy_data()
1172 struct stripe_head *sh = stripe_head_ref; in ops_complete_biofill() local
1177 (unsigned long long)sh->sector); in ops_complete_biofill()
1180 for (i = sh->disks; i--; ) { in ops_complete_biofill()
1181 struct r5dev *dev = &sh->dev[i]; in ops_complete_biofill()
1203 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); in ops_complete_biofill()
1207 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_biofill()
1208 raid5_release_stripe(sh); in ops_complete_biofill()
1211 static void ops_run_biofill(struct stripe_head *sh) in ops_run_biofill() argument
1217 BUG_ON(sh->batch_head); in ops_run_biofill()
1219 (unsigned long long)sh->sector); in ops_run_biofill()
1221 for (i = sh->disks; i--; ) { in ops_run_biofill()
1222 struct r5dev *dev = &sh->dev[i]; in ops_run_biofill()
1225 spin_lock_irq(&sh->stripe_lock); in ops_run_biofill()
1228 spin_unlock_irq(&sh->stripe_lock); in ops_run_biofill()
1232 dev->sector, tx, sh); in ops_run_biofill()
1238 atomic_inc(&sh->count); in ops_run_biofill()
1239 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); in ops_run_biofill()
1243 static void mark_target_uptodate(struct stripe_head *sh, int target) in mark_target_uptodate() argument
1250 tgt = &sh->dev[target]; in mark_target_uptodate()
1258 struct stripe_head *sh = stripe_head_ref; in ops_complete_compute() local
1261 (unsigned long long)sh->sector); in ops_complete_compute()
1264 mark_target_uptodate(sh, sh->ops.target); in ops_complete_compute()
1265 mark_target_uptodate(sh, sh->ops.target2); in ops_complete_compute()
1267 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); in ops_complete_compute()
1268 if (sh->check_state == check_state_compute_run) in ops_complete_compute()
1269 sh->check_state = check_state_compute_result; in ops_complete_compute()
1270 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_compute()
1271 raid5_release_stripe(sh); in ops_complete_compute()
1275 static addr_conv_t *to_addr_conv(struct stripe_head *sh, in to_addr_conv() argument
1281 return addr + sizeof(struct page *) * (sh->disks + 2); in to_addr_conv()
1294 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
1296 int disks = sh->disks; in ops_run_compute5()
1298 int target = sh->ops.target; in ops_run_compute5()
1299 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute5()
1306 BUG_ON(sh->batch_head); in ops_run_compute5()
1309 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
1314 xor_srcs[count++] = sh->dev[i].page; in ops_run_compute5()
1316 atomic_inc(&sh->count); in ops_run_compute5()
1319 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5()
1338 struct stripe_head *sh, in set_syndrome_sources() argument
1341 int disks = sh->disks; in set_syndrome_sources()
1342 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); in set_syndrome_sources()
1343 int d0_idx = raid6_d0(sh); in set_syndrome_sources()
1353 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in set_syndrome_sources()
1354 struct r5dev *dev = &sh->dev[i]; in set_syndrome_sources()
1356 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources()
1362 srcs[slot] = sh->dev[i].page; in set_syndrome_sources()
1370 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
1372 int disks = sh->disks; in ops_run_compute6_1()
1375 int qd_idx = sh->qd_idx; in ops_run_compute6_1()
1383 BUG_ON(sh->batch_head); in ops_run_compute6_1()
1384 if (sh->ops.target < 0) in ops_run_compute6_1()
1385 target = sh->ops.target2; in ops_run_compute6_1()
1386 else if (sh->ops.target2 < 0) in ops_run_compute6_1()
1387 target = sh->ops.target; in ops_run_compute6_1()
1393 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
1395 tgt = &sh->dev[target]; in ops_run_compute6_1()
1399 atomic_inc(&sh->count); in ops_run_compute6_1()
1402 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); in ops_run_compute6_1()
1406 ops_complete_compute, sh, in ops_run_compute6_1()
1407 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1415 blocks[count++] = sh->dev[i].page; in ops_run_compute6_1()
1419 NULL, ops_complete_compute, sh, in ops_run_compute6_1()
1420 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1428 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
1430 int i, count, disks = sh->disks; in ops_run_compute6_2()
1431 int syndrome_disks = sh->ddf_layout ? disks : disks-2; in ops_run_compute6_2()
1432 int d0_idx = raid6_d0(sh); in ops_run_compute6_2()
1434 int target = sh->ops.target; in ops_run_compute6_2()
1435 int target2 = sh->ops.target2; in ops_run_compute6_2()
1436 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute6_2()
1437 struct r5dev *tgt2 = &sh->dev[target2]; in ops_run_compute6_2()
1442 BUG_ON(sh->batch_head); in ops_run_compute6_2()
1444 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
1457 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in ops_run_compute6_2()
1459 blocks[slot] = sh->dev[i].page; in ops_run_compute6_2()
1472 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1474 atomic_inc(&sh->count); in ops_run_compute6_2()
1481 ops_complete_compute, sh, in ops_run_compute6_2()
1482 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1488 int qd_idx = sh->qd_idx; in ops_run_compute6_2()
1500 blocks[count++] = sh->dev[i].page; in ops_run_compute6_2()
1502 dest = sh->dev[data_target].page; in ops_run_compute6_2()
1506 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1510 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); in ops_run_compute6_2()
1512 ops_complete_compute, sh, in ops_run_compute6_2()
1513 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1519 ops_complete_compute, sh, in ops_run_compute6_2()
1520 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1537 struct stripe_head *sh = stripe_head_ref; in ops_complete_prexor() local
1540 (unsigned long long)sh->sector); in ops_complete_prexor()
1544 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor5() argument
1547 int disks = sh->disks; in ops_run_prexor5()
1549 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5()
1553 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5()
1555 BUG_ON(sh->batch_head); in ops_run_prexor5()
1557 (unsigned long long)sh->sector); in ops_run_prexor5()
1560 struct r5dev *dev = &sh->dev[i]; in ops_run_prexor5()
1567 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor5()
1574 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor6() argument
1582 (unsigned long long)sh->sector); in ops_run_prexor6()
1584 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); in ops_run_prexor6()
1587 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor6()
1594 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) in ops_run_biodrain() argument
1596 int disks = sh->disks; in ops_run_biodrain()
1598 struct stripe_head *head_sh = sh; in ops_run_biodrain()
1601 (unsigned long long)sh->sector); in ops_run_biodrain()
1607 sh = head_sh; in ops_run_biodrain()
1612 dev = &sh->dev[i]; in ops_run_biodrain()
1613 spin_lock_irq(&sh->stripe_lock); in ops_run_biodrain()
1616 sh->overwrite_disks = 0; in ops_run_biodrain()
1619 spin_unlock_irq(&sh->stripe_lock); in ops_run_biodrain()
1632 dev->sector, tx, sh); in ops_run_biodrain()
1643 sh = list_first_entry(&sh->batch_list, in ops_run_biodrain()
1646 if (sh == head_sh) in ops_run_biodrain()
1658 struct stripe_head *sh = stripe_head_ref; in ops_complete_reconstruct() local
1659 int disks = sh->disks; in ops_complete_reconstruct()
1660 int pd_idx = sh->pd_idx; in ops_complete_reconstruct()
1661 int qd_idx = sh->qd_idx; in ops_complete_reconstruct()
1666 (unsigned long long)sh->sector); in ops_complete_reconstruct()
1669 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); in ops_complete_reconstruct()
1670 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); in ops_complete_reconstruct()
1671 discard |= test_bit(R5_Discard, &sh->dev[i].flags); in ops_complete_reconstruct()
1675 struct r5dev *dev = &sh->dev[i]; in ops_complete_reconstruct()
1687 if (sh->reconstruct_state == reconstruct_state_drain_run) in ops_complete_reconstruct()
1688 sh->reconstruct_state = reconstruct_state_drain_result; in ops_complete_reconstruct()
1689 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) in ops_complete_reconstruct()
1690 sh->reconstruct_state = reconstruct_state_prexor_drain_result; in ops_complete_reconstruct()
1692 BUG_ON(sh->reconstruct_state != reconstruct_state_run); in ops_complete_reconstruct()
1693 sh->reconstruct_state = reconstruct_state_result; in ops_complete_reconstruct()
1696 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_reconstruct()
1697 raid5_release_stripe(sh); in ops_complete_reconstruct()
1701 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
1704 int disks = sh->disks; in ops_run_reconstruct5()
1707 int count, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5()
1712 struct stripe_head *head_sh = sh; in ops_run_reconstruct5()
1716 (unsigned long long)sh->sector); in ops_run_reconstruct5()
1718 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct5()
1721 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct5()
1724 if (i >= sh->disks) { in ops_run_reconstruct5()
1725 atomic_inc(&sh->count); in ops_run_reconstruct5()
1726 set_bit(R5_Discard, &sh->dev[pd_idx].flags); in ops_run_reconstruct5()
1727 ops_complete_reconstruct(sh); in ops_run_reconstruct5()
1738 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1740 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
1745 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1747 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
1759 list_first_entry(&sh->batch_list, in ops_run_reconstruct5()
1767 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
1771 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
1780 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct5()
1787 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
1793 struct stripe_head *head_sh = sh; in ops_run_reconstruct6()
1798 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
1800 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct6()
1801 if (sh->pd_idx == i || sh->qd_idx == i) in ops_run_reconstruct6()
1803 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct6()
1806 if (i >= sh->disks) { in ops_run_reconstruct6()
1807 atomic_inc(&sh->count); in ops_run_reconstruct6()
1808 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in ops_run_reconstruct6()
1809 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in ops_run_reconstruct6()
1810 ops_complete_reconstruct(sh); in ops_run_reconstruct6()
1817 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { in ops_run_reconstruct6()
1825 count = set_syndrome_sources(blocks, sh, synflags); in ops_run_reconstruct6()
1827 list_first_entry(&sh->batch_list, in ops_run_reconstruct6()
1833 head_sh, to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
1836 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
1840 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct6()
1848 struct stripe_head *sh = stripe_head_ref; in ops_complete_check() local
1851 (unsigned long long)sh->sector); in ops_complete_check()
1853 sh->check_state = check_state_check_result; in ops_complete_check()
1854 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_check()
1855 raid5_release_stripe(sh); in ops_complete_check()
1858 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
1860 int disks = sh->disks; in ops_run_check_p()
1861 int pd_idx = sh->pd_idx; in ops_run_check_p()
1862 int qd_idx = sh->qd_idx; in ops_run_check_p()
1871 (unsigned long long)sh->sector); in ops_run_check_p()
1873 BUG_ON(sh->batch_head); in ops_run_check_p()
1875 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
1880 xor_srcs[count++] = sh->dev[i].page; in ops_run_check_p()
1884 to_addr_conv(sh, percpu, 0)); in ops_run_check_p()
1886 &sh->ops.zero_sum_result, &submit); in ops_run_check_p()
1888 atomic_inc(&sh->count); in ops_run_check_p()
1889 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); in ops_run_check_p()
1893 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
1900 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
1902 BUG_ON(sh->batch_head); in ops_run_check_pq()
1903 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); in ops_run_check_pq()
1907 atomic_inc(&sh->count); in ops_run_check_pq()
1909 sh, to_addr_conv(sh, percpu, 0)); in ops_run_check_pq()
1911 &sh->ops.zero_sum_result, percpu->spare_page, &submit); in ops_run_check_pq()
1914 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) in raid_run_ops() argument
1916 int overlap_clear = 0, i, disks = sh->disks; in raid_run_ops()
1918 struct r5conf *conf = sh->raid_conf; in raid_run_ops()
1926 ops_run_biofill(sh); in raid_run_ops()
1932 tx = ops_run_compute5(sh, percpu); in raid_run_ops()
1934 if (sh->ops.target2 < 0 || sh->ops.target < 0) in raid_run_ops()
1935 tx = ops_run_compute6_1(sh, percpu); in raid_run_ops()
1937 tx = ops_run_compute6_2(sh, percpu); in raid_run_ops()
1946 tx = ops_run_prexor5(sh, percpu, tx); in raid_run_ops()
1948 tx = ops_run_prexor6(sh, percpu, tx); in raid_run_ops()
1952 tx = ops_run_biodrain(sh, tx); in raid_run_ops()
1958 ops_run_reconstruct5(sh, percpu, tx); in raid_run_ops()
1960 ops_run_reconstruct6(sh, percpu, tx); in raid_run_ops()
1964 if (sh->check_state == check_state_run) in raid_run_ops()
1965 ops_run_check_p(sh, percpu); in raid_run_ops()
1966 else if (sh->check_state == check_state_run_q) in raid_run_ops()
1967 ops_run_check_pq(sh, percpu, 0); in raid_run_ops()
1968 else if (sh->check_state == check_state_run_pq) in raid_run_ops()
1969 ops_run_check_pq(sh, percpu, 1); in raid_run_ops()
1974 if (overlap_clear && !sh->batch_head) in raid_run_ops()
1976 struct r5dev *dev = &sh->dev[i]; in raid_run_ops()
1978 wake_up(&sh->raid_conf->wait_for_overlap); in raid_run_ops()
1985 struct stripe_head *sh; in alloc_stripe() local
1987 sh = kmem_cache_zalloc(sc, gfp); in alloc_stripe()
1988 if (sh) { in alloc_stripe()
1989 spin_lock_init(&sh->stripe_lock); in alloc_stripe()
1990 spin_lock_init(&sh->batch_lock); in alloc_stripe()
1991 INIT_LIST_HEAD(&sh->batch_list); in alloc_stripe()
1992 INIT_LIST_HEAD(&sh->lru); in alloc_stripe()
1993 atomic_set(&sh->count, 1); in alloc_stripe()
1995 return sh; in alloc_stripe()
1999 struct stripe_head *sh; in grow_one_stripe() local
2001 sh = alloc_stripe(conf->slab_cache, gfp); in grow_one_stripe()
2002 if (!sh) in grow_one_stripe()
2005 sh->raid_conf = conf; in grow_one_stripe()
2007 if (grow_buffers(sh, gfp)) { in grow_one_stripe()
2008 shrink_buffers(sh); in grow_one_stripe()
2009 kmem_cache_free(conf->slab_cache, sh); in grow_one_stripe()
2012 sh->hash_lock_index = in grow_one_stripe()
2017 raid5_release_stripe(sh); in grow_one_stripe()
2261 struct stripe_head *sh; in drop_one_stripe() local
2265 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2267 if (!sh) in drop_one_stripe()
2269 BUG_ON(atomic_read(&sh->count)); in drop_one_stripe()
2270 shrink_buffers(sh); in drop_one_stripe()
2271 kmem_cache_free(conf->slab_cache, sh); in drop_one_stripe()
2289 struct stripe_head *sh = bi->bi_private; in raid5_end_read_request() local
2290 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request()
2291 int disks = sh->disks, i; in raid5_end_read_request()
2297 if (bi == &sh->dev[i].req) in raid5_end_read_request()
2301 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
2307 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2317 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2318 s = sh->sector + rdev->new_data_offset; in raid5_end_read_request()
2320 s = sh->sector + rdev->data_offset; in raid5_end_read_request()
2322 set_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2323 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in raid5_end_read_request()
2336 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2337 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2338 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2339 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2348 clear_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2350 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2367 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { in raid5_end_read_request()
2385 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2388 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { in raid5_end_read_request()
2389 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2390 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2392 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2394 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2395 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2399 rdev, sh->sector, STRIPE_SECTORS, 0))) in raid5_end_read_request()
2404 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_read_request()
2405 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_read_request()
2406 raid5_release_stripe(sh); in raid5_end_read_request()
2411 struct stripe_head *sh = bi->bi_private; in raid5_end_write_request() local
2412 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request()
2413 int disks = sh->disks, i; in raid5_end_write_request()
2420 if (bi == &sh->dev[i].req) { in raid5_end_write_request()
2424 if (bi == &sh->dev[i].rreq) { in raid5_end_write_request()
2438 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
2448 else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2451 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); in raid5_end_write_request()
2454 set_bit(STRIPE_DEGRADED, &sh->state); in raid5_end_write_request()
2456 set_bit(R5_WriteError, &sh->dev[i].flags); in raid5_end_write_request()
2460 } else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
2463 set_bit(R5_MadeGood, &sh->dev[i].flags); in raid5_end_write_request()
2464 if (test_bit(R5_ReadError, &sh->dev[i].flags)) in raid5_end_write_request()
2469 set_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_write_request()
2474 if (sh->batch_head && bi->bi_error && !replacement) in raid5_end_write_request()
2475 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); in raid5_end_write_request()
2477 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) in raid5_end_write_request()
2478 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_write_request()
2479 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_write_request()
2480 raid5_release_stripe(sh); in raid5_end_write_request()
2482 if (sh->batch_head && sh != sh->batch_head) in raid5_end_write_request()
2483 raid5_release_stripe(sh->batch_head); in raid5_end_write_request()
2486 static void raid5_build_block(struct stripe_head *sh, int i, int previous) in raid5_build_block() argument
2488 struct r5dev *dev = &sh->dev[i]; in raid5_build_block()
2493 dev->req.bi_private = sh; in raid5_build_block()
2498 dev->rreq.bi_private = sh; in raid5_build_block()
2501 dev->sector = raid5_compute_blocknr(sh, i, previous); in raid5_build_block()
2536 struct stripe_head *sh) in raid5_compute_sector() argument
2724 if (sh) { in raid5_compute_sector()
2725 sh->pd_idx = pd_idx; in raid5_compute_sector()
2726 sh->qd_idx = qd_idx; in raid5_compute_sector()
2727 sh->ddf_layout = ddf_layout; in raid5_compute_sector()
2736 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) in raid5_compute_blocknr() argument
2738 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr()
2739 int raid_disks = sh->disks; in raid5_compute_blocknr()
2741 sector_t new_sector = sh->sector, check; in raid5_compute_blocknr()
2756 if (i == sh->pd_idx) in raid5_compute_blocknr()
2764 if (i > sh->pd_idx) in raid5_compute_blocknr()
2769 if (i < sh->pd_idx) in raid5_compute_blocknr()
2771 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
2783 if (i == sh->qd_idx) in raid5_compute_blocknr()
2790 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
2792 else if (i > sh->pd_idx) in raid5_compute_blocknr()
2797 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
2801 if (i < sh->pd_idx) in raid5_compute_blocknr()
2803 i -= (sh->pd_idx + 2); in raid5_compute_blocknr()
2813 if (sh->pd_idx == 0) in raid5_compute_blocknr()
2817 if (i < sh->pd_idx) in raid5_compute_blocknr()
2819 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
2824 if (i > sh->pd_idx) in raid5_compute_blocknr()
2829 if (i < sh->pd_idx) in raid5_compute_blocknr()
2831 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
2847 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
2848 || sh2.qd_idx != sh->qd_idx) { in raid5_compute_blocknr()
2857 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, in schedule_reconstruction() argument
2860 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; in schedule_reconstruction()
2861 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction()
2867 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
2885 sh->reconstruct_state = reconstruct_state_drain_run; in schedule_reconstruction()
2888 sh->reconstruct_state = reconstruct_state_run; in schedule_reconstruction()
2893 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) in schedule_reconstruction()
2896 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
2897 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
2899 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || in schedule_reconstruction()
2900 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); in schedule_reconstruction()
2903 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
2919 sh->reconstruct_state = reconstruct_state_prexor_drain_run; in schedule_reconstruction()
2928 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
2929 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
2933 int qd_idx = sh->qd_idx; in schedule_reconstruction()
2934 struct r5dev *dev = &sh->dev[qd_idx]; in schedule_reconstruction()
2942 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
2951 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in add_stripe_bio() argument
2955 struct r5conf *conf = sh->raid_conf; in add_stripe_bio()
2960 (unsigned long long)sh->sector); in add_stripe_bio()
2970 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
2972 if (sh->batch_head) in add_stripe_bio()
2975 bip = &sh->dev[dd_idx].towrite; in add_stripe_bio()
2979 bip = &sh->dev[dd_idx].toread; in add_stripe_bio()
2989 clear_bit(STRIPE_BATCH_READY, &sh->state); in add_stripe_bio()
2999 sector_t sector = sh->dev[dd_idx].sector; in add_stripe_bio()
3000 for (bi=sh->dev[dd_idx].towrite; in add_stripe_bio()
3001 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && in add_stripe_bio()
3003 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
3007 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) in add_stripe_bio()
3008 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) in add_stripe_bio()
3009 sh->overwrite_disks++; in add_stripe_bio()
3014 (unsigned long long)sh->sector, dd_idx); in add_stripe_bio()
3029 set_bit(STRIPE_BITMAP_PENDING, &sh->state); in add_stripe_bio()
3030 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3031 bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
3033 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
3034 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); in add_stripe_bio()
3035 if (!sh->batch_head) { in add_stripe_bio()
3036 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
3037 set_bit(STRIPE_BIT_DELAY, &sh->state); in add_stripe_bio()
3040 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3042 if (stripe_can_batch(sh)) in add_stripe_bio()
3043 stripe_add_to_batch_list(conf, sh); in add_stripe_bio()
3047 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
3048 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3055 struct stripe_head *sh) in stripe_set_idx() argument
3067 &dd_idx, sh); in stripe_set_idx()
3071 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3076 BUG_ON(sh->batch_head); in handle_failed_stripe()
3081 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in handle_failed_stripe()
3093 sh->sector, in handle_failed_stripe()
3099 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3101 bi = sh->dev[i].towrite; in handle_failed_stripe()
3102 sh->dev[i].towrite = NULL; in handle_failed_stripe()
3103 sh->overwrite_disks = 0; in handle_failed_stripe()
3104 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3108 r5l_stripe_write_finished(sh); in handle_failed_stripe()
3110 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3114 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3115 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3125 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3129 bi = sh->dev[i].written; in handle_failed_stripe()
3130 sh->dev[i].written = NULL; in handle_failed_stripe()
3131 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { in handle_failed_stripe()
3132 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_failed_stripe()
3133 sh->dev[i].page = sh->dev[i].orig_page; in handle_failed_stripe()
3138 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3139 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3152 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && in handle_failed_stripe()
3154 (!test_bit(R5_Insync, &sh->dev[i].flags) || in handle_failed_stripe()
3155 test_bit(R5_ReadError, &sh->dev[i].flags))) { in handle_failed_stripe()
3156 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3157 bi = sh->dev[i].toread; in handle_failed_stripe()
3158 sh->dev[i].toread = NULL; in handle_failed_stripe()
3159 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3160 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3165 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
3167 r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
3176 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
3181 clear_bit(R5_LOCKED, &sh->dev[i].flags); in handle_failed_stripe()
3186 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_failed_stripe()
3192 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3198 BUG_ON(sh->batch_head); in handle_failed_sync()
3199 clear_bit(STRIPE_SYNCING, &sh->state); in handle_failed_sync()
3200 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_failed_sync()
3220 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3227 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3238 static int want_replace(struct stripe_head *sh, int disk_idx) in want_replace() argument
3243 rdev = sh->raid_conf->disks[disk_idx].replacement; in want_replace()
3247 && (rdev->recovery_offset <= sh->sector in want_replace()
3248 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3261 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, in need_this_block() argument
3264 struct r5dev *dev = &sh->dev[disk_idx]; in need_this_block()
3265 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], in need_this_block()
3266 &sh->dev[s->failed_num[1]] }; in need_this_block()
3283 (s->replacing && want_replace(sh, disk_idx))) in need_this_block()
3308 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in need_this_block()
3336 if (sh->raid_conf->level != 6 && in need_this_block()
3337 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3341 if (s->failed_num[i] != sh->pd_idx && in need_this_block()
3342 s->failed_num[i] != sh->qd_idx && in need_this_block()
3351 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, in fetch_block() argument
3354 struct r5dev *dev = &sh->dev[disk_idx]; in fetch_block()
3357 if (need_this_block(sh, s, disk_idx, disks)) { in fetch_block()
3363 BUG_ON(sh->batch_head); in fetch_block()
3371 (unsigned long long)sh->sector, disk_idx); in fetch_block()
3372 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3375 sh->ops.target = disk_idx; in fetch_block()
3376 sh->ops.target2 = -1; /* no 2nd target */ in fetch_block()
3395 &sh->dev[other].flags)) in fetch_block()
3400 (unsigned long long)sh->sector, in fetch_block()
3402 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3404 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); in fetch_block()
3405 set_bit(R5_Wantcompute, &sh->dev[other].flags); in fetch_block()
3406 sh->ops.target = disk_idx; in fetch_block()
3407 sh->ops.target2 = other; in fetch_block()
3426 static void handle_stripe_fill(struct stripe_head *sh, in handle_stripe_fill() argument
3436 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && in handle_stripe_fill()
3437 !sh->reconstruct_state) in handle_stripe_fill()
3439 if (fetch_block(sh, s, i, disks)) in handle_stripe_fill()
3441 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_fill()
3452 struct stripe_head *sh, int disks, struct bio_list *return_bi) in handle_stripe_clean_event() argument
3457 struct stripe_head *head_sh = sh; in handle_stripe_clean_event()
3461 if (sh->dev[i].written) { in handle_stripe_clean_event()
3462 dev = &sh->dev[i]; in handle_stripe_clean_event()
3490 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_stripe_clean_event()
3492 !test_bit(STRIPE_DEGRADED, &sh->state), in handle_stripe_clean_event()
3495 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
3498 if (sh != head_sh) { in handle_stripe_clean_event()
3499 dev = &sh->dev[i]; in handle_stripe_clean_event()
3503 sh = head_sh; in handle_stripe_clean_event()
3504 dev = &sh->dev[i]; in handle_stripe_clean_event()
3511 r5l_stripe_write_finished(sh); in handle_stripe_clean_event()
3514 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_clean_event()
3516 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
3517 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
3518 if (sh->qd_idx >= 0) { in handle_stripe_clean_event()
3519 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
3520 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
3523 clear_bit(STRIPE_DISCARD, &sh->state); in handle_stripe_clean_event()
3530 hash = sh->hash_lock_index; in handle_stripe_clean_event()
3532 remove_hash(sh); in handle_stripe_clean_event()
3535 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
3537 if (sh != head_sh) in handle_stripe_clean_event()
3540 sh = head_sh; in handle_stripe_clean_event()
3542 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) in handle_stripe_clean_event()
3543 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_clean_event()
3547 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_stripe_clean_event()
3556 struct stripe_head *sh, in handle_stripe_dirtying() argument
3571 (recovery_cp < MaxSector && sh->sector >= recovery_cp && in handle_stripe_dirtying()
3579 (unsigned long long)sh->sector); in handle_stripe_dirtying()
3582 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3583 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe_dirtying()
3594 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
3605 (unsigned long long)sh->sector, rmw, rcw); in handle_stripe_dirtying()
3606 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
3612 (unsigned long long)sh->sector, rmw); in handle_stripe_dirtying()
3614 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3615 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe_dirtying()
3621 &sh->state)) { in handle_stripe_dirtying()
3628 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
3629 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
3639 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
3641 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
3648 &sh->state)) { in handle_stripe_dirtying()
3656 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
3657 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
3663 (unsigned long long)sh->sector, in handle_stripe_dirtying()
3664 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); in handle_stripe_dirtying()
3668 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe_dirtying()
3669 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
3681 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && in handle_stripe_dirtying()
3683 !test_bit(STRIPE_BIT_DELAY, &sh->state))) in handle_stripe_dirtying()
3684 schedule_reconstruction(sh, s, rcw == 0, 0); in handle_stripe_dirtying()
3687 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
3692 BUG_ON(sh->batch_head); in handle_parity_checks5()
3693 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks5()
3695 switch (sh->check_state) { in handle_parity_checks5()
3700 sh->check_state = check_state_run; in handle_parity_checks5()
3702 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
3706 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks5()
3709 sh->check_state = check_state_idle; in handle_parity_checks5()
3711 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
3714 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks5()
3725 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks5()
3726 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
3731 sh->check_state = check_state_idle; in handle_parity_checks5()
3743 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) in handle_parity_checks5()
3747 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
3752 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
3754 sh->check_state = check_state_compute_run; in handle_parity_checks5()
3755 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks5()
3758 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
3759 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
3760 sh->ops.target2 = -1; in handle_parity_checks5()
3769 __func__, sh->check_state, in handle_parity_checks5()
3770 (unsigned long long) sh->sector); in handle_parity_checks5()
3775 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
3779 int pd_idx = sh->pd_idx; in handle_parity_checks6()
3780 int qd_idx = sh->qd_idx; in handle_parity_checks6()
3783 BUG_ON(sh->batch_head); in handle_parity_checks6()
3784 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks6()
3794 switch (sh->check_state) { in handle_parity_checks6()
3802 sh->check_state = check_state_run; in handle_parity_checks6()
3808 if (sh->check_state == check_state_run) in handle_parity_checks6()
3809 sh->check_state = check_state_run_pq; in handle_parity_checks6()
3811 sh->check_state = check_state_run_q; in handle_parity_checks6()
3815 sh->ops.zero_sum_result = 0; in handle_parity_checks6()
3817 if (sh->check_state == check_state_run) { in handle_parity_checks6()
3819 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
3822 if (sh->check_state >= check_state_run && in handle_parity_checks6()
3823 sh->check_state <= check_state_run_pq) { in handle_parity_checks6()
3835 sh->check_state = check_state_idle; in handle_parity_checks6()
3838 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks6()
3846 dev = &sh->dev[s->failed_num[1]]; in handle_parity_checks6()
3852 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks6()
3857 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
3858 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
3863 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
3864 dev = &sh->dev[qd_idx]; in handle_parity_checks6()
3869 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks6()
3871 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
3878 sh->check_state = check_state_idle; in handle_parity_checks6()
3884 if (sh->ops.zero_sum_result == 0) { in handle_parity_checks6()
3887 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
3893 sh->check_state = check_state_compute_result; in handle_parity_checks6()
3904 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
3906 int *target = &sh->ops.target; in handle_parity_checks6()
3908 sh->ops.target = -1; in handle_parity_checks6()
3909 sh->ops.target2 = -1; in handle_parity_checks6()
3910 sh->check_state = check_state_compute_run; in handle_parity_checks6()
3911 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks6()
3913 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
3915 &sh->dev[pd_idx].flags); in handle_parity_checks6()
3917 target = &sh->ops.target2; in handle_parity_checks6()
3920 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
3922 &sh->dev[qd_idx].flags); in handle_parity_checks6()
3933 __func__, sh->check_state, in handle_parity_checks6()
3934 (unsigned long long) sh->sector); in handle_parity_checks6()
3939 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
3947 BUG_ON(sh->batch_head); in handle_stripe_expansion()
3948 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); in handle_stripe_expansion()
3949 for (i = 0; i < sh->disks; i++) in handle_stripe_expansion()
3950 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
3955 sector_t bn = raid5_compute_blocknr(sh, i, 1); in handle_stripe_expansion()
3975 sh->dev[i].page, 0, 0, STRIPE_SIZE, in handle_stripe_expansion()
4010 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) in analyse_stripe() argument
4012 struct r5conf *conf = sh->raid_conf; in analyse_stripe()
4013 int disks = sh->disks; in analyse_stripe()
4020 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; in analyse_stripe()
4021 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; in analyse_stripe()
4034 dev = &sh->dev[i]; in analyse_stripe()
4045 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) in analyse_stripe()
4074 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && in analyse_stripe()
4075 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
4089 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
4116 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) in analyse_stripe()
4175 if (test_bit(STRIPE_SYNCING, &sh->state)) { in analyse_stripe()
4185 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4194 static int clear_batch_ready(struct stripe_head *sh) in clear_batch_ready() argument
4201 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) in clear_batch_ready()
4202 return (sh->batch_head && sh->batch_head != sh); in clear_batch_ready()
4203 spin_lock(&sh->stripe_lock); in clear_batch_ready()
4204 if (!sh->batch_head) { in clear_batch_ready()
4205 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4213 if (sh->batch_head != sh) { in clear_batch_ready()
4214 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4217 spin_lock(&sh->batch_lock); in clear_batch_ready()
4218 list_for_each_entry(tmp, &sh->batch_list, batch_list) in clear_batch_ready()
4220 spin_unlock(&sh->batch_lock); in clear_batch_ready()
4221 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4233 struct stripe_head *sh, *next; in break_stripe_batch_list() local
4237 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { in break_stripe_batch_list()
4239 list_del_init(&sh->batch_list); in break_stripe_batch_list()
4241 WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | in break_stripe_batch_list()
4257 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | in break_stripe_batch_list()
4262 sh->check_state = head_sh->check_state; in break_stripe_batch_list()
4263 sh->reconstruct_state = head_sh->reconstruct_state; in break_stripe_batch_list()
4264 for (i = 0; i < sh->disks; i++) { in break_stripe_batch_list()
4265 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in break_stripe_batch_list()
4267 sh->dev[i].flags = head_sh->dev[i].flags & in break_stripe_batch_list()
4270 spin_lock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4271 sh->batch_head = NULL; in break_stripe_batch_list()
4272 spin_unlock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4274 sh->state & handle_flags) in break_stripe_batch_list()
4275 set_bit(STRIPE_HANDLE, &sh->state); in break_stripe_batch_list()
4276 raid5_release_stripe(sh); in break_stripe_batch_list()
4291 static void handle_stripe(struct stripe_head *sh) in handle_stripe() argument
4294 struct r5conf *conf = sh->raid_conf; in handle_stripe()
4297 int disks = sh->disks; in handle_stripe()
4300 clear_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4301 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { in handle_stripe()
4304 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4308 if (clear_batch_ready(sh) ) { in handle_stripe()
4309 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
4313 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) in handle_stripe()
4314 break_stripe_batch_list(sh, 0); in handle_stripe()
4316 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { in handle_stripe()
4317 spin_lock(&sh->stripe_lock); in handle_stripe()
4319 if (!test_bit(STRIPE_DISCARD, &sh->state) && in handle_stripe()
4320 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { in handle_stripe()
4321 set_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4322 clear_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4323 clear_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4325 spin_unlock(&sh->stripe_lock); in handle_stripe()
4327 clear_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4331 (unsigned long long)sh->sector, sh->state, in handle_stripe()
4332 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
4333 sh->check_state, sh->reconstruct_state); in handle_stripe()
4335 analyse_stripe(sh, &s); in handle_stripe()
4337 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) in handle_stripe()
4341 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4348 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4356 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { in handle_stripe()
4358 set_bit(STRIPE_BIOFILL_RUN, &sh->state); in handle_stripe()
4369 sh->check_state = 0; in handle_stripe()
4370 sh->reconstruct_state = 0; in handle_stripe()
4371 break_stripe_batch_list(sh, 0); in handle_stripe()
4373 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); in handle_stripe()
4375 handle_failed_sync(conf, sh, &s); in handle_stripe()
4382 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) in handle_stripe()
4384 if (sh->reconstruct_state == reconstruct_state_drain_result || in handle_stripe()
4385 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { in handle_stripe()
4386 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
4391 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && in handle_stripe()
4392 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
4393 BUG_ON(sh->qd_idx >= 0 && in handle_stripe()
4394 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && in handle_stripe()
4395 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); in handle_stripe()
4397 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
4399 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
4408 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
4410 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4413 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe()
4421 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
4422 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
4423 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
4424 qdev = &sh->dev[sh->qd_idx]; in handle_stripe()
4425 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) in handle_stripe()
4426 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) in handle_stripe()
4438 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); in handle_stripe()
4449 handle_stripe_fill(sh, &s, disks); in handle_stripe()
4457 if (s.to_write && !sh->reconstruct_state && !sh->check_state) in handle_stripe()
4458 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
4465 if (sh->check_state || in handle_stripe()
4467 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
4468 !test_bit(STRIPE_INSYNC, &sh->state))) { in handle_stripe()
4470 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
4472 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
4476 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) in handle_stripe()
4477 && !test_bit(STRIPE_REPLACED, &sh->state)) { in handle_stripe()
4480 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { in handle_stripe()
4481 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_stripe()
4482 set_bit(R5_WantReplace, &sh->dev[i].flags); in handle_stripe()
4483 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
4487 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4488 set_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4491 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
4492 test_bit(STRIPE_INSYNC, &sh->state)) { in handle_stripe()
4494 clear_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4495 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_stripe()
4504 struct r5dev *dev = &sh->dev[s.failed_num[i]]; in handle_stripe()
4524 if (sh->reconstruct_state == reconstruct_state_result) { in handle_stripe()
4526 = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
4531 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4532 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4542 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
4543 clear_bit(STRIPE_EXPANDING, &sh->state); in handle_stripe()
4545 set_bit(R5_Wantwrite, &sh->dev[i].flags); in handle_stripe()
4546 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
4551 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && in handle_stripe()
4552 !sh->reconstruct_state) { in handle_stripe()
4554 sh->disks = conf->raid_disks; in handle_stripe()
4555 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
4556 schedule_reconstruction(sh, &s, 1, 1); in handle_stripe()
4557 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { in handle_stripe()
4558 clear_bit(STRIPE_EXPAND_READY, &sh->state); in handle_stripe()
4565 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) in handle_stripe()
4566 handle_stripe_expansion(conf, sh); in handle_stripe()
4586 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
4590 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
4597 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
4606 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
4613 raid_run_ops(sh, s.ops_request); in handle_stripe()
4615 ops_run_io(sh, &s); in handle_stripe()
4638 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
4646 struct stripe_head *sh; in raid5_activate_delayed() local
4647 sh = list_entry(l, struct stripe_head, lru); in raid5_activate_delayed()
4649 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_activate_delayed()
4650 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_activate_delayed()
4652 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
4653 raid5_wakeup_stripe_thread(sh); in raid5_activate_delayed()
4666 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); in activate_bit_delay() local
4668 list_del_init(&sh->lru); in activate_bit_delay()
4669 atomic_inc(&sh->count); in activate_bit_delay()
4670 hash = sh->hash_lock_index; in activate_bit_delay()
4671 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
4903 struct stripe_head *sh = NULL, *tmp; in __get_priority_stripe() local
4929 sh = list_entry(handle_list->next, typeof(*sh), lru); in __get_priority_stripe()
4933 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { in __get_priority_stripe()
4953 sh = tmp; in __get_priority_stripe()
4958 if (sh) { in __get_priority_stripe()
4966 if (!sh) in __get_priority_stripe()
4971 sh->group = NULL; in __get_priority_stripe()
4973 list_del_init(&sh->lru); in __get_priority_stripe()
4974 BUG_ON(atomic_inc_return(&sh->count) != 1); in __get_priority_stripe()
4975 return sh; in __get_priority_stripe()
4988 struct stripe_head *sh; in raid5_unplug() local
4997 sh = list_first_entry(&cb->list, struct stripe_head, lru); in raid5_unplug()
4998 list_del_init(&sh->lru); in raid5_unplug()
5005 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); in raid5_unplug()
5010 hash = sh->hash_lock_index; in raid5_unplug()
5011 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5024 struct stripe_head *sh) in release_stripe_plug() argument
5032 raid5_release_stripe(sh); in release_stripe_plug()
5045 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) in release_stripe_plug()
5046 list_add_tail(&sh->lru, &cb->list); in release_stripe_plug()
5048 raid5_release_stripe(sh); in release_stripe_plug()
5055 struct stripe_head *sh; in make_discard_request() local
5083 sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); in make_discard_request()
5086 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5087 if (test_bit(STRIPE_SYNCING, &sh->state)) { in make_discard_request()
5088 raid5_release_stripe(sh); in make_discard_request()
5092 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5093 spin_lock_irq(&sh->stripe_lock); in make_discard_request()
5095 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5097 if (sh->dev[d].towrite || sh->dev[d].toread) { in make_discard_request()
5098 set_bit(R5_Overlap, &sh->dev[d].flags); in make_discard_request()
5099 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5100 raid5_release_stripe(sh); in make_discard_request()
5105 set_bit(STRIPE_DISCARD, &sh->state); in make_discard_request()
5107 sh->overwrite_disks = 0; in make_discard_request()
5109 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5111 sh->dev[d].towrite = bi; in make_discard_request()
5112 set_bit(R5_OVERWRITE, &sh->dev[d].flags); in make_discard_request()
5114 sh->overwrite_disks++; in make_discard_request()
5116 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5122 sh->sector, in make_discard_request()
5125 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5126 set_bit(STRIPE_BIT_DELAY, &sh->state); in make_discard_request()
5129 set_bit(STRIPE_HANDLE, &sh->state); in make_discard_request()
5130 clear_bit(STRIPE_DELAYED, &sh->state); in make_discard_request()
5131 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_discard_request()
5133 release_stripe_plug(mddev, sh); in make_discard_request()
5149 struct stripe_head *sh; in make_request() local
5237 sh = raid5_get_active_stripe(conf, new_sector, previous, in make_request()
5239 if (sh) { in make_request()
5258 raid5_release_stripe(sh); in make_request()
5268 raid5_release_stripe(sh); in make_request()
5275 raid5_release_stripe(sh); in make_request()
5291 if (test_bit(STRIPE_EXPANDING, &sh->state) || in make_request()
5292 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { in make_request()
5298 raid5_release_stripe(sh); in make_request()
5303 set_bit(STRIPE_HANDLE, &sh->state); in make_request()
5304 clear_bit(STRIPE_DELAYED, &sh->state); in make_request()
5305 if ((!sh->batch_head || sh == sh->batch_head) && in make_request()
5307 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_request()
5309 release_stripe_plug(mddev, sh); in make_request()
5344 struct stripe_head *sh; in reshape_request() local
5485 sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
5486 set_bit(STRIPE_EXPANDING, &sh->state); in reshape_request()
5491 for (j=sh->disks; j--;) { in reshape_request()
5493 if (j == sh->pd_idx) in reshape_request()
5496 j == sh->qd_idx) in reshape_request()
5498 s = raid5_compute_blocknr(sh, j, 0); in reshape_request()
5503 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); in reshape_request()
5504 set_bit(R5_Expanded, &sh->dev[j].flags); in reshape_request()
5505 set_bit(R5_UPTODATE, &sh->dev[j].flags); in reshape_request()
5508 set_bit(STRIPE_EXPAND_READY, &sh->state); in reshape_request()
5509 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
5511 list_add(&sh->lru, &stripes); in reshape_request()
5534 sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
5535 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); in reshape_request()
5536 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
5537 raid5_release_stripe(sh); in reshape_request()
5544 sh = list_entry(stripes.next, struct stripe_head, lru); in reshape_request()
5545 list_del_init(&sh->lru); in reshape_request()
5546 raid5_release_stripe(sh); in reshape_request()
5586 struct stripe_head *sh; in sync_request() local
5644 sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); in sync_request()
5645 if (sh == NULL) { in sync_request()
5646 sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); in sync_request()
5667 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); in sync_request()
5668 set_bit(STRIPE_HANDLE, &sh->state); in sync_request()
5670 raid5_release_stripe(sh); in sync_request()
5687 struct stripe_head *sh; in retry_aligned_read() local
5709 sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); in retry_aligned_read()
5711 if (!sh) { in retry_aligned_read()
5718 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { in retry_aligned_read()
5719 raid5_release_stripe(sh); in retry_aligned_read()
5725 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); in retry_aligned_read()
5726 handle_stripe(sh); in retry_aligned_read()
5727 raid5_release_stripe(sh); in retry_aligned_read()
5745 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local
5750 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
5751 batch[batch_size++] = sh; in handle_active_stripes()