/linux-4.1.27/drivers/md/ |
H A D | raid5.c | 217 if (idx == sh->pd_idx) raid6_idx_to_slot() 766 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ stripe_add_to_batch_list() 807 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) stripe_add_to_batch_list() 1354 if (i == sh->qd_idx || i == sh->pd_idx || set_syndrome_sources() 1547 int count = 0, pd_idx = sh->pd_idx, i; ops_run_prexor5() local 1551 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; ops_run_prexor5() 1658 int pd_idx = sh->pd_idx; ops_complete_reconstruct() local 1675 if (dev->written || i == pd_idx || i == qd_idx) { ops_complete_reconstruct() 1705 int count, pd_idx = sh->pd_idx, i; ops_run_reconstruct5() local 1717 if (pd_idx == i) ops_run_reconstruct5() 1724 set_bit(R5_Discard, &sh->dev[pd_idx].flags); ops_run_reconstruct5() 1736 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; ops_run_reconstruct5() 1743 xor_dest = sh->dev[pd_idx].page; ops_run_reconstruct5() 1746 if (i != pd_idx) ops_run_reconstruct5() 1799 if (sh->pd_idx == i || sh->qd_idx == i) ops_run_reconstruct6() 1806 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); ops_run_reconstruct6() 1859 int pd_idx = sh->pd_idx; ops_run_check_p() local 1873 xor_dest = sh->dev[pd_idx].page; ops_run_check_p() 1876 if (i == pd_idx || i == qd_idx) ops_run_check_p() 2543 int pd_idx, qd_idx; raid5_compute_sector() local 2571 pd_idx = qd_idx = -1; raid5_compute_sector() 2574 pd_idx = data_disks; raid5_compute_sector() 2579 pd_idx = data_disks - sector_div(stripe2, raid_disks); raid5_compute_sector() 2580 if (*dd_idx >= pd_idx) raid5_compute_sector() 2584 pd_idx = sector_div(stripe2, raid_disks); raid5_compute_sector() 2585 if (*dd_idx >= pd_idx) raid5_compute_sector() 2589 pd_idx = data_disks - sector_div(stripe2, raid_disks); raid5_compute_sector() 2590 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; raid5_compute_sector() 2593 pd_idx = sector_div(stripe2, raid_disks); raid5_compute_sector() 2594 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; raid5_compute_sector() 2597 pd_idx = 0; raid5_compute_sector() 2601 pd_idx = data_disks; raid5_compute_sector() 2611 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); raid5_compute_sector() 2612 qd_idx = pd_idx + 1; raid5_compute_sector() 2613 if (pd_idx == raid_disks-1) { raid5_compute_sector() 2616 } else if (*dd_idx >= pd_idx) raid5_compute_sector() 2620 pd_idx = sector_div(stripe2, raid_disks); raid5_compute_sector() 2621 qd_idx = pd_idx + 1; raid5_compute_sector() 2622 if (pd_idx == raid_disks-1) { raid5_compute_sector() 2625 } else if (*dd_idx >= pd_idx) raid5_compute_sector() 2629 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); raid5_compute_sector() 2630 qd_idx = (pd_idx + 1) % raid_disks; raid5_compute_sector() 2631 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; raid5_compute_sector() 2634 pd_idx = sector_div(stripe2, raid_disks); raid5_compute_sector() 2635 qd_idx = (pd_idx + 1) % raid_disks; raid5_compute_sector() 2636 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; raid5_compute_sector() 2640 pd_idx = 0; raid5_compute_sector() 2645 pd_idx = data_disks; raid5_compute_sector() 2653 pd_idx = sector_div(stripe2, raid_disks); raid5_compute_sector() 2654 qd_idx = pd_idx + 1; raid5_compute_sector() 2655 if (pd_idx == raid_disks-1) { raid5_compute_sector() 2658 } else if (*dd_idx >= pd_idx) raid5_compute_sector() 2669 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); raid5_compute_sector() 2670 qd_idx = pd_idx + 1; raid5_compute_sector() 2671 if (pd_idx == raid_disks-1) { raid5_compute_sector() 2674 } else if (*dd_idx >= pd_idx) raid5_compute_sector() 2681 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); raid5_compute_sector() 2682 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; raid5_compute_sector() 2683 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; raid5_compute_sector() 2689 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); raid5_compute_sector() 2690 if (*dd_idx >= pd_idx) raid5_compute_sector() 2696 pd_idx = sector_div(stripe2, raid_disks-1); raid5_compute_sector() 2697 if (*dd_idx >= pd_idx) raid5_compute_sector() 2703 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); raid5_compute_sector() 2704 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); raid5_compute_sector() 2709 pd_idx = sector_div(stripe2, raid_disks-1); raid5_compute_sector() 2710 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); raid5_compute_sector() 2715 pd_idx = 0; raid5_compute_sector() 2727 sh->pd_idx = pd_idx; raid5_compute_sector() 2758 if (i == sh->pd_idx) compute_blocknr() 2766 if (i > sh->pd_idx) compute_blocknr() 2771 if (i < sh->pd_idx) compute_blocknr() 2773 i -= (sh->pd_idx + 1); compute_blocknr() 2792 if (sh->pd_idx == raid_disks-1) compute_blocknr() 2794 else if (i > sh->pd_idx) compute_blocknr() 2799 if (sh->pd_idx == raid_disks-1) compute_blocknr() 2803 if (i < sh->pd_idx) compute_blocknr() 2805 i -= (sh->pd_idx + 2); compute_blocknr() 2815 if (sh->pd_idx == 0) compute_blocknr() 2819 if (i < sh->pd_idx) compute_blocknr() 2821 i -= (sh->pd_idx + 1); compute_blocknr() 2826 if (i > sh->pd_idx) compute_blocknr() 2831 if (i < sh->pd_idx) compute_blocknr() 2833 i -= (sh->pd_idx + 1); compute_blocknr() 2849 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx compute_blocknr() 2862 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; schedule_reconstruction() local 2898 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || schedule_reconstruction() 2899 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); schedule_reconstruction() 2906 if (i == pd_idx || i == qd_idx) schedule_reconstruction() 2930 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); schedule_reconstruction() 2931 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); schedule_reconstruction() 3196 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) handle_failed_sync() 3337 if (s->failed_num[i] != sh->pd_idx && need_this_block() 3508 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { handle_stripe_clean_event() 3510 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); handle_stripe_clean_event() 3511 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); handle_stripe_clean_event() 3577 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && handle_stripe_dirtying() 3588 i != sh->pd_idx && i != sh->qd_idx && handle_stripe_dirtying() 3609 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && handle_stripe_dirtying() 3635 i != sh->pd_idx && i != sh->qd_idx && handle_stripe_dirtying() 3696 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); handle_parity_checks5() 3705 dev = &sh->dev[sh->pd_idx]; handle_parity_checks5() 3752 &sh->dev[sh->pd_idx].flags); handle_parity_checks5() 3753 sh->ops.target = sh->pd_idx; handle_parity_checks5() 3773 int pd_idx = sh->pd_idx; handle_parity_checks6() local 3813 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); handle_parity_checks6() 3852 dev = &sh->dev[pd_idx]; handle_parity_checks6() 3909 &sh->dev[pd_idx].flags); handle_parity_checks6() 3910 *target = pd_idx; handle_parity_checks6() 3944 if (i != sh->pd_idx && i != sh->qd_idx) { handle_stripe_expansion() 3975 if (j != sh2->pd_idx && handle_stripe_expansion() 4321 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", handle_stripe() 4323 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, handle_stripe() 4379 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && handle_stripe() 4380 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); handle_stripe() 4387 (i == sh->pd_idx || i == sh->qd_idx || handle_stripe() 4396 ((i == sh->pd_idx || i == sh->qd_idx) && handle_stripe() 4409 pdev = &sh->dev[sh->pd_idx]; handle_stripe() 4410 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) handle_stripe() 4411 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); handle_stripe() 4483 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) handle_stripe() 5091 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); make_discard_request() 5097 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); make_discard_request() 5100 if (d == sh->pd_idx || d == sh->qd_idx) make_discard_request() 5114 if (d == sh->pd_idx || d == sh->qd_idx) make_discard_request() 5480 if (j == sh->pd_idx) reshape_request()
|
H A D | raid5.h | 205 short pd_idx; /* parity disk index */ member in struct:stripe_head
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_hmc.h | 159 * @pd_idx: page descriptor index 161 #define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ 164 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
|
H A D | i40e_lan_hmc.c | 307 u32 pd_idx = 0, pd_lmt = 0; i40e_create_lan_hmc_object() local 354 info->start_idx, info->count, &pd_idx, i40e_create_lan_hmc_object() 382 /* find pd_idx and pd_lmt in this sd */ i40e_create_lan_hmc_object() 383 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); i40e_create_lan_hmc_object() 431 pd_idx1 = max(pd_idx, i40e_create_lan_hmc_object() 551 u32 pd_idx, pd_lmt, rel_pd_idx; i40e_delete_lan_hmc_object() local 598 info->start_idx, info->count, &pd_idx, i40e_delete_lan_hmc_object() 601 for (j = pd_idx; j < pd_lmt; j++) { i40e_delete_lan_hmc_object() 1008 u32 pd_idx, pd_lmt, rel_pd_idx; i40e_hmc_get_object_va() local 1048 &pd_idx, &pd_lmt); i40e_hmc_get_object_va() 1049 rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD; i40e_hmc_get_object_va()
|
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/ |
H A D | i40e_hmc.h | 159 * @pd_idx: page descriptor index 161 #define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ 164 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
|
/linux-4.1.27/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma_verbs.c | 327 u16 pd_idx = 0; ocrdma_get_pd_num() local 334 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true); ocrdma_get_pd_num() 335 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx; ocrdma_get_pd_num() 336 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx; ocrdma_get_pd_num() 339 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); ocrdma_get_pd_num() 340 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; ocrdma_get_pd_num() 347 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); ocrdma_get_pd_num() 348 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; ocrdma_get_pd_num()
|