head_sh 7 drivers/md/raid5-log.h extern int r5l_write_stripe(struct r5l_log *log, struct stripe_head *head_sh); head_sh 983 drivers/md/raid5.c struct stripe_head *head_sh = sh; head_sh 1000 drivers/md/raid5.c sh = head_sh; head_sh 1037 drivers/md/raid5.c if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev) head_sh 1107 drivers/md/raid5.c if (sh != head_sh) head_sh 1108 drivers/md/raid5.c atomic_inc(&head_sh->count); head_sh 1115 drivers/md/raid5.c if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags)) head_sh 1174 drivers/md/raid5.c if (sh != head_sh) head_sh 1175 drivers/md/raid5.c atomic_inc(&head_sh->count); head_sh 1215 drivers/md/raid5.c if (!head_sh->batch_head) head_sh 1219 drivers/md/raid5.c if (sh != head_sh) head_sh 1224 drivers/md/raid5.c defer_issue_bios(conf, head_sh->sector, &pending_bios); head_sh 1725 drivers/md/raid5.c struct stripe_head *head_sh = sh; head_sh 1734 drivers/md/raid5.c sh = head_sh; head_sh 1735 drivers/md/raid5.c if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) { head_sh 1776 drivers/md/raid5.c if (head_sh->batch_head) { head_sh 1780 drivers/md/raid5.c if (sh == head_sh) head_sh 1849 drivers/md/raid5.c struct stripe_head *head_sh = sh; head_sh 1873 drivers/md/raid5.c if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) { head_sh 1878 drivers/md/raid5.c if (head_sh->dev[i].written || head_sh 1879 drivers/md/raid5.c test_bit(R5_InJournal, &head_sh->dev[i].flags)) head_sh 1896 drivers/md/raid5.c last_stripe = !head_sh->batch_head || head_sh 1898 drivers/md/raid5.c struct stripe_head, batch_list) == head_sh; head_sh 1903 drivers/md/raid5.c atomic_inc(&head_sh->count); head_sh 1904 drivers/md/raid5.c init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh, head_sh 1931 drivers/md/raid5.c struct stripe_head *head_sh = sh; head_sh 1964 drivers/md/raid5.c last_stripe = !head_sh->batch_head || head_sh 1966 drivers/md/raid5.c struct stripe_head, batch_list) == head_sh; head_sh 1969 drivers/md/raid5.c atomic_inc(&head_sh->count); head_sh 1971 drivers/md/raid5.c head_sh, to_addr_conv(sh, percpu, j)); head_sh 3743 drivers/md/raid5.c static void break_stripe_batch_list(struct stripe_head *head_sh, head_sh 3756 drivers/md/raid5.c struct stripe_head *head_sh = sh; head_sh 3791 drivers/md/raid5.c if (head_sh->batch_head) { head_sh 3795 drivers/md/raid5.c if (sh != head_sh) { head_sh 3800 drivers/md/raid5.c sh = head_sh; head_sh 3829 drivers/md/raid5.c if (head_sh->batch_head) { head_sh 3832 drivers/md/raid5.c if (sh != head_sh) head_sh 3835 drivers/md/raid5.c sh = head_sh; head_sh 3846 drivers/md/raid5.c if (head_sh->batch_head && do_endio) head_sh 3847 drivers/md/raid5.c break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); head_sh 4605 drivers/md/raid5.c static void break_stripe_batch_list(struct stripe_head *head_sh, head_sh 4612 drivers/md/raid5.c list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { head_sh 4629 drivers/md/raid5.c WARN_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | head_sh 4631 drivers/md/raid5.c "head stripe state: %lx\n", head_sh->state); head_sh 4637 drivers/md/raid5.c head_sh->state & (1 << STRIPE_INSYNC)); head_sh 4639 drivers/md/raid5.c sh->check_state = head_sh->check_state; head_sh 4640 drivers/md/raid5.c sh->reconstruct_state = head_sh->reconstruct_state; head_sh 4647 drivers/md/raid5.c sh->dev[i].flags = head_sh->dev[i].flags & head_sh 4655 drivers/md/raid5.c spin_lock_irq(&head_sh->stripe_lock); head_sh 4656 drivers/md/raid5.c head_sh->batch_head = NULL; head_sh 4657 drivers/md/raid5.c spin_unlock_irq(&head_sh->stripe_lock); head_sh 4658 drivers/md/raid5.c for (i = 0; i < head_sh->disks; i++) head_sh 4659 drivers/md/raid5.c if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) head_sh 4661 drivers/md/raid5.c if (head_sh->state & handle_flags) head_sh 4662 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &head_sh->state); head_sh 4665 drivers/md/raid5.c wake_up(&head_sh->raid_conf->wait_for_overlap);