H A D | raid5.c | 762 int dd_idx; stripe_add_to_batch_list() local 806 dd_idx = 0; stripe_add_to_batch_list() 807 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) stripe_add_to_batch_list() 808 dd_idx++; stripe_add_to_batch_list() 809 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw) stripe_add_to_batch_list() 2537 int previous, int *dd_idx, raid5_compute_sector() 2566 *dd_idx = sector_div(stripe, data_disks); raid5_compute_sector() 2580 if (*dd_idx >= pd_idx) raid5_compute_sector() 2581 (*dd_idx)++; raid5_compute_sector() 2585 if (*dd_idx >= pd_idx) raid5_compute_sector() 2586 (*dd_idx)++; raid5_compute_sector() 2590 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; raid5_compute_sector() 2594 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; raid5_compute_sector() 2598 (*dd_idx)++; raid5_compute_sector() 2614 (*dd_idx)++; /* Q D D D P */ raid5_compute_sector() 2616 } else if (*dd_idx >= pd_idx) raid5_compute_sector() 2617 (*dd_idx) += 2; /* D D P Q D */ raid5_compute_sector() 2623 (*dd_idx)++; /* Q D D D P */ raid5_compute_sector() 2625 } else if (*dd_idx >= pd_idx) raid5_compute_sector() 2626 (*dd_idx) += 2; /* D D P Q D */ raid5_compute_sector() 2631 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; raid5_compute_sector() 2636 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; raid5_compute_sector() 2642 (*dd_idx) += 2; raid5_compute_sector() 2656 (*dd_idx)++; /* Q D D D P */ raid5_compute_sector() 2658 } else if (*dd_idx >= pd_idx) raid5_compute_sector() 2659 (*dd_idx) += 2; /* D D P Q D */ raid5_compute_sector() 2672 (*dd_idx)++; /* Q D D D P */ raid5_compute_sector() 2674 } else if (*dd_idx >= pd_idx) raid5_compute_sector() 2675 (*dd_idx) += 2; /* D D P Q D */ raid5_compute_sector() 2683 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; raid5_compute_sector() 2690 if (*dd_idx >= pd_idx) raid5_compute_sector() 2691 (*dd_idx)++; raid5_compute_sector() 2697 if (*dd_idx >= pd_idx) raid5_compute_sector() 2698 (*dd_idx)++; raid5_compute_sector() 2704 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); raid5_compute_sector() 2710 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); raid5_compute_sector() 2716 (*dd_idx)++; raid5_compute_sector() 2751 int dummy1, dd_idx = i; compute_blocknr() local 2849 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx compute_blocknr() 2953 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, add_stripe_bio() argument 2977 bip = &sh->dev[dd_idx].towrite; add_stripe_bio() 2981 bip = &sh->dev[dd_idx].toread; add_stripe_bio() 3001 sector_t sector = sh->dev[dd_idx].sector; add_stripe_bio() 3002 for (bi=sh->dev[dd_idx].towrite; add_stripe_bio() 3003 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && add_stripe_bio() 3005 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { add_stripe_bio() 3009 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) add_stripe_bio() 3010 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) add_stripe_bio() 3016 (unsigned long long)sh->sector, dd_idx); add_stripe_bio() 3049 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); add_stripe_bio() 3061 int dd_idx; stripe_set_idx() local 3069 &dd_idx, sh); stripe_set_idx() 3945 int dd_idx, j; handle_stripe_expansion() local 3951 &dd_idx, NULL); handle_stripe_expansion() 3960 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { handle_stripe_expansion() 3968 tx = async_memcpy(sh2->dev[dd_idx].page, handle_stripe_expansion() 3972 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); handle_stripe_expansion() 3973 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); handle_stripe_expansion() 4813 int dd_idx; chunk_aligned_read() local 4839 0, &dd_idx, NULL); chunk_aligned_read() 4843 rdev = rcu_dereference(conf->disks[dd_idx].replacement); chunk_aligned_read() 4846 rdev = rcu_dereference(conf->disks[dd_idx].rdev); chunk_aligned_read() 5151 int dd_idx; make_request() local 5228 &dd_idx, NULL); make_request() 5288 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { make_request() 5346 int dd_idx; reshape_request() local 5513 1, &dd_idx, NULL); reshape_request() 5517 1, &dd_idx, NULL); reshape_request() 5669 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. retry_aligned_read() 5672 int dd_idx; retry_aligned_read() local 5681 0, &dd_idx, NULL); retry_aligned_read() 5702 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { retry_aligned_read() 5709 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); retry_aligned_read() 2536 raid5_compute_sector(struct r5conf *conf, sector_t r_sector, int previous, int *dd_idx, struct stripe_head *sh) raid5_compute_sector() argument
|