dd_idx           1820 drivers/md/raid5-cache.c 	int dd_idx;
dd_idx           1824 drivers/md/raid5-cache.c 			     &dd_idx, sh);
dd_idx           1825 drivers/md/raid5-cache.c 	r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
dd_idx           1826 drivers/md/raid5-cache.c 	sh->dev[dd_idx].log_checksum =
dd_idx           1830 drivers/md/raid5-cache.c 	set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
dd_idx            867 drivers/md/raid5-ppl.c 			int dd_idx;
dd_idx            889 drivers/md/raid5-ppl.c 						      &dd_idx, NULL);
dd_idx            892 drivers/md/raid5-ppl.c 				 (unsigned long long)r_sector, dd_idx,
dd_idx            895 drivers/md/raid5-ppl.c 			rdev = conf->disks[dd_idx].rdev;
dd_idx            899 drivers/md/raid5-ppl.c 					 __func__, indent, "", dd_idx);
dd_idx            744 drivers/md/raid5.c 	int dd_idx;
dd_idx            792 drivers/md/raid5.c 	dd_idx = 0;
dd_idx            793 drivers/md/raid5.c 	while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
dd_idx            794 drivers/md/raid5.c 		dd_idx++;
dd_idx            795 drivers/md/raid5.c 	if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
dd_idx            796 drivers/md/raid5.c 	    bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
dd_idx           2714 drivers/md/raid5.c 			      int previous, int *dd_idx,
dd_idx           2743 drivers/md/raid5.c 	*dd_idx = sector_div(stripe, data_disks);
dd_idx           2757 drivers/md/raid5.c 			if (*dd_idx >= pd_idx)
dd_idx           2758 drivers/md/raid5.c 				(*dd_idx)++;
dd_idx           2762 drivers/md/raid5.c 			if (*dd_idx >= pd_idx)
dd_idx           2763 drivers/md/raid5.c 				(*dd_idx)++;
dd_idx           2767 drivers/md/raid5.c 			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
dd_idx           2771 drivers/md/raid5.c 			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
dd_idx           2775 drivers/md/raid5.c 			(*dd_idx)++;
dd_idx           2791 drivers/md/raid5.c 				(*dd_idx)++;	/* Q D D D P */
dd_idx           2793 drivers/md/raid5.c 			} else if (*dd_idx >= pd_idx)
dd_idx           2794 drivers/md/raid5.c 				(*dd_idx) += 2; /* D D P Q D */
dd_idx           2800 drivers/md/raid5.c 				(*dd_idx)++;	/* Q D D D P */
dd_idx           2802 drivers/md/raid5.c 			} else if (*dd_idx >= pd_idx)
dd_idx           2803 drivers/md/raid5.c 				(*dd_idx) += 2; /* D D P Q D */
dd_idx           2808 drivers/md/raid5.c 			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
dd_idx           2813 drivers/md/raid5.c 			*dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
dd_idx           2819 drivers/md/raid5.c 			(*dd_idx) += 2;
dd_idx           2833 drivers/md/raid5.c 				(*dd_idx)++;	/* Q D D D P */
dd_idx           2835 drivers/md/raid5.c 			} else if (*dd_idx >= pd_idx)
dd_idx           2836 drivers/md/raid5.c 				(*dd_idx) += 2; /* D D P Q D */
dd_idx           2849 drivers/md/raid5.c 				(*dd_idx)++;	/* Q D D D P */
dd_idx           2851 drivers/md/raid5.c 			} else if (*dd_idx >= pd_idx)
dd_idx           2852 drivers/md/raid5.c 				(*dd_idx) += 2; /* D D P Q D */
dd_idx           2860 drivers/md/raid5.c 			*dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
dd_idx           2867 drivers/md/raid5.c 			if (*dd_idx >= pd_idx)
dd_idx           2868 drivers/md/raid5.c 				(*dd_idx)++;
dd_idx           2874 drivers/md/raid5.c 			if (*dd_idx >= pd_idx)
dd_idx           2875 drivers/md/raid5.c 				(*dd_idx)++;
dd_idx           2881 drivers/md/raid5.c 			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
dd_idx           2887 drivers/md/raid5.c 			*dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
dd_idx           2893 drivers/md/raid5.c 			(*dd_idx)++;
dd_idx           2928 drivers/md/raid5.c 	int dummy1, dd_idx = i;
dd_idx           3026 drivers/md/raid5.c 	if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
dd_idx           3205 drivers/md/raid5.c static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
dd_idx           3217 drivers/md/raid5.c 	sh->dev[dd_idx].write_hint = bi->bi_write_hint;
dd_idx           3222 drivers/md/raid5.c 		bip = &sh->dev[dd_idx].towrite;
dd_idx           3226 drivers/md/raid5.c 		bip = &sh->dev[dd_idx].toread;
dd_idx           3251 drivers/md/raid5.c 			    (i == dd_idx || sh->dev[i].towrite)) {
dd_idx           3277 drivers/md/raid5.c 		sector_t sector = sh->dev[dd_idx].sector;
dd_idx           3278 drivers/md/raid5.c 		for (bi=sh->dev[dd_idx].towrite;
dd_idx           3279 drivers/md/raid5.c 		     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
dd_idx           3281 drivers/md/raid5.c 		     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
dd_idx           3285 drivers/md/raid5.c 		if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
dd_idx           3286 drivers/md/raid5.c 			if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
dd_idx           3292 drivers/md/raid5.c 		(unsigned long long)sh->sector, dd_idx);
dd_idx           3325 drivers/md/raid5.c 	set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
dd_idx           3337 drivers/md/raid5.c 	int dd_idx;
dd_idx           3345 drivers/md/raid5.c 			     &dd_idx, sh);
dd_idx           4315 drivers/md/raid5.c 			int dd_idx, j;
dd_idx           4321 drivers/md/raid5.c 							  &dd_idx, NULL);
dd_idx           4330 drivers/md/raid5.c 			   test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
dd_idx           4338 drivers/md/raid5.c 			tx = async_memcpy(sh2->dev[dd_idx].page,
dd_idx           4342 drivers/md/raid5.c 			set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
dd_idx           4343 drivers/md/raid5.c 			set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
dd_idx           5207 drivers/md/raid5.c 	int dd_idx;
dd_idx           5233 drivers/md/raid5.c 				     0, &dd_idx, NULL);
dd_idx           5237 drivers/md/raid5.c 	rdev = rcu_dereference(conf->disks[dd_idx].replacement);
dd_idx           5240 drivers/md/raid5.c 		rdev = rcu_dereference(conf->disks[dd_idx].rdev);
dd_idx           5580 drivers/md/raid5.c 	int dd_idx;
dd_idx           5671 drivers/md/raid5.c 						  &dd_idx, NULL);
dd_idx           5712 drivers/md/raid5.c 			    !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
dd_idx           5772 drivers/md/raid5.c 	int dd_idx;
dd_idx           5957 drivers/md/raid5.c 				     1, &dd_idx, NULL);
dd_idx           5961 drivers/md/raid5.c 				     1, &dd_idx, NULL);
dd_idx           6129 drivers/md/raid5.c 	int dd_idx;
dd_idx           6137 drivers/md/raid5.c 				      0, &dd_idx, NULL);
dd_idx           6158 drivers/md/raid5.c 		if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
dd_idx           6165 drivers/md/raid5.c 		set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
dd_idx            760 drivers/md/raid5.h 				     int previous, int *dd_idx,