pl 192 arch/arm/include/asm/assembler.h .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo pl 449 arch/arm/include/asm/assembler.h .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo pl 519 arch/ia64/include/asm/pal.h pl : 2, /* privilege level */ pl 556 arch/ia64/include/asm/pal.h pl : 2, /* privilege level */ pl 592 arch/ia64/include/asm/pal.h pl : 2, /* privilege level */ pl 622 arch/ia64/include/asm/pal.h pl : 2, /* privilege level */ pl 652 arch/ia64/include/asm/pal.h pl : 2, /* privilege level */ pl 737 arch/ia64/kernel/palinfo.c unsigned long pl:2; pl 800 arch/ia64/kernel/palinfo.c gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma, pl 192 arch/mips/kernel/pm-cps.c static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, pl 215 arch/mips/kernel/pm-cps.c uasm_build_label(pl, *pp, lbl); pl 236 arch/mips/kernel/pm-cps.c static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, pl 292 arch/mips/kernel/pm-cps.c uasm_build_label(pl, *pp, lbl); pl 329 arch/mips/kernel/pm-cps.c static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, pl 334 arch/mips/kernel/pm-cps.c uasm_build_label(pl, *pp, lbl); pl 265 arch/powerpc/include/asm/sfp-machine.h #define umul_ppmm(ph, pl, m0, m1) \ pl 269 arch/powerpc/include/asm/sfp-machine.h (pl) = __m0 * __m1; \ pl 42 drivers/acpi/acpica/acdebug.h #define PARAM_LIST(pl) pl pl 1176 drivers/ata/pata_legacy.c struct legacy_probe *pl = &probe_list[0]; pl 1221 drivers/ata/pata_legacy.c for (i = 0; i < NR_HOST; i++, pl++) { pl 1222 drivers/ata/pata_legacy.c if (pl->port == 0) pl 1224 drivers/ata/pata_legacy.c if (pl->type == UNKNOWN) pl 1225 drivers/ata/pata_legacy.c pl->type = probe_chip_type(pl); pl 1226 drivers/ata/pata_legacy.c pl->slot = slot++; pl 1227 drivers/ata/pata_legacy.c if (legacy_init_one(pl) == 0) pl 2467 drivers/atm/nicstar.c pool_levels pl; pl 2475 drivers/atm/nicstar.c (pl.buftype, &((pool_levels __user *) arg)->buftype)) pl 2477 drivers/atm/nicstar.c switch (pl.buftype) { pl 2479 drivers/atm/nicstar.c pl.count = pl 2481 drivers/atm/nicstar.c pl.level.min = card->sbnr.min; pl 2482 drivers/atm/nicstar.c pl.level.init = card->sbnr.init; pl 2483 drivers/atm/nicstar.c pl.level.max = card->sbnr.max; pl 2487 drivers/atm/nicstar.c pl.count = pl 2489 drivers/atm/nicstar.c pl.level.min = card->lbnr.min; pl 2490 drivers/atm/nicstar.c pl.level.init = card->lbnr.init; pl 2491 drivers/atm/nicstar.c pl.level.max = card->lbnr.max; pl 2495 drivers/atm/nicstar.c pl.count = card->hbpool.count; pl 2496 drivers/atm/nicstar.c pl.level.min = card->hbnr.min; pl 2497 drivers/atm/nicstar.c pl.level.init = card->hbnr.init; pl 2498 drivers/atm/nicstar.c pl.level.max = card->hbnr.max; pl 2502 drivers/atm/nicstar.c pl.count = card->iovpool.count; pl 2503 drivers/atm/nicstar.c pl.level.min = card->iovnr.min; pl 2504 drivers/atm/nicstar.c pl.level.init = card->iovnr.init; pl 2505 drivers/atm/nicstar.c pl.level.max = card->iovnr.max; pl 2512 drivers/atm/nicstar.c if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) pl 2513 drivers/atm/nicstar.c return (sizeof(pl)); pl 2520 drivers/atm/nicstar.c if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) pl 2522 drivers/atm/nicstar.c if (pl.level.min >= pl.level.init pl 2523 drivers/atm/nicstar.c || pl.level.init >= pl.level.max) pl 2525 drivers/atm/nicstar.c if (pl.level.min == 0) pl 2527 drivers/atm/nicstar.c switch (pl.buftype) { pl 2529 drivers/atm/nicstar.c if (pl.level.max > TOP_SB) pl 2531 drivers/atm/nicstar.c card->sbnr.min = pl.level.min; pl 2532 drivers/atm/nicstar.c card->sbnr.init = pl.level.init; pl 2533 drivers/atm/nicstar.c card->sbnr.max = pl.level.max; pl 2537 drivers/atm/nicstar.c if (pl.level.max > TOP_LB) pl 2539 drivers/atm/nicstar.c card->lbnr.min = pl.level.min; pl 2540 drivers/atm/nicstar.c card->lbnr.init = pl.level.init; pl 2541 drivers/atm/nicstar.c card->lbnr.max = pl.level.max; pl 2545 drivers/atm/nicstar.c if (pl.level.max > TOP_HB) pl 2547 drivers/atm/nicstar.c card->hbnr.min = pl.level.min; pl 2548 drivers/atm/nicstar.c card->hbnr.init = pl.level.init; pl 2549 drivers/atm/nicstar.c card->hbnr.max = pl.level.max; pl 2553 drivers/atm/nicstar.c if (pl.level.max > TOP_IOVB) pl 2555 drivers/atm/nicstar.c card->iovnr.min = pl.level.min; pl 2556 drivers/atm/nicstar.c card->iovnr.init = pl.level.init; pl 2557 drivers/atm/nicstar.c card->iovnr.max = pl.level.max; pl 2685 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct kv_pl *pl = &ps->levels[index]; pl 2690 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pl->sclk = sclk; pl 2691 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pl->vddc_index = clock_info->sumo.vddcIndex; pl 2696 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pl->ds_divider_index = 5; pl 2697 drivers/gpu/drm/amd/amdgpu/kv_dpm.c pl->ss_divider_index = 5; pl 2903 drivers/gpu/drm/amd/amdgpu/kv_dpm.c struct kv_pl *pl = &ps->levels[i]; pl 2905 drivers/gpu/drm/amd/amdgpu/kv_dpm.c i, pl->sclk, pl 2906 drivers/gpu/drm/amd/amdgpu/kv_dpm.c kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); pl 1847 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct rv7xx_pl *pl, pl 3289 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct rv7xx_pl *pl) pl 3292 drivers/gpu/drm/amd/amdgpu/si_dpm.c if ((pl->mclk == 0) || (pl->sclk == 0)) pl 3295 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (pl->mclk == pl->sclk) pl 3298 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (pl->mclk > pl->sclk) { pl 3299 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio) pl 3300 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->sclk = btc_get_valid_sclk(adev, pl 3302 drivers/gpu/drm/amd/amdgpu/si_dpm.c (pl->mclk + pl 3306 drivers/gpu/drm/amd/amdgpu/si_dpm.c if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta) pl 3307 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->mclk = btc_get_valid_mclk(adev, pl 3309 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->sclk - pl 4758 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct rv7xx_pl *pl, pl 4766 drivers/gpu/drm/amd/amdgpu/si_dpm.c (u8)si_calculate_memory_refresh_rate(adev, pl->sclk); pl 4769 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->sclk, pl 4770 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->mclk); pl 5102 drivers/gpu/drm/amd/amdgpu/si_dpm.c ret = si_convert_power_level_to_smc(adev, &ulv->pl, pl 5131 drivers/gpu/drm/amd/amdgpu/si_dpm.c ret = si_populate_memory_timing_parameters(adev, &ulv->pl, pl 5221 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (ulv->supported && ulv->pl.vddc) { pl 5440 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct rv7xx_pl *pl, pl 5455 drivers/gpu/drm/amd/amdgpu/si_dpm.c level->gen2PCIE = (u8)pl->pcie_gen; pl 5457 drivers/gpu/drm/amd/amdgpu/si_dpm.c ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk); pl 5464 drivers/gpu/drm/amd/amdgpu/si_dpm.c (pl->mclk <= pi->mclk_stutter_mode_threshold) && pl 5475 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (pl->mclk > pi->mclk_edc_enable_threshold) pl 5478 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold) pl 5481 drivers/gpu/drm/amd/amdgpu/si_dpm.c level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk); pl 5484 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (si_get_mclk_frequency_ratio(pl->mclk, true) >= pl 5494 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->mclk); pl 5500 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->sclk, pl 5501 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->mclk, pl 5509 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->vddc, &level->vddc); pl 5525 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->vddci, &level->vddci); pl 5533 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->vddc, pl 5534 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->sclk, pl 5535 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->mclk, pl 5543 drivers/gpu/drm/amd/amdgpu/si_dpm.c ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd); pl 5617 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (state->performance_levels[0].mclk != ulv->pl.mclk) pl 5625 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (ulv->pl.vddc < pl 5760 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (ulv->supported && ulv->pl.vddc) { pl 6072 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct rv7xx_pl *pl, pl 6079 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) pl 6127 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (ulv->supported && ulv->pl.vddc != 0) pl 6128 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl, pl 7141 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct rv7xx_pl *pl = &ps->performance_levels[index]; pl 7146 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow); pl 7147 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->sclk |= clock_info->si.ucEngineClockHigh << 16; pl 7148 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); pl 7149 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->mclk |= clock_info->si.ucMemoryClockHigh << 16; pl 7151 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->vddc = le16_to_cpu(clock_info->si.usVDDC); pl 7152 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); pl 7153 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->flags = le32_to_cpu(clock_info->si.ulFlags); pl 7154 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->pcie_gen = amdgpu_get_pcie_gen_support(adev, pl 7160 drivers/gpu/drm/amd/amdgpu/si_dpm.c ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, pl 7163 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->vddc = leakage_voltage; pl 7166 drivers/gpu/drm/amd/amdgpu/si_dpm.c pi->acpi_vddc = pl->vddc; pl 7167 drivers/gpu/drm/amd/amdgpu/si_dpm.c eg_pi->acpi_vddci = pl->vddci; pl 7168 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_pi->acpi_pcie_gen = pl->pcie_gen; pl 7175 drivers/gpu/drm/amd/amdgpu/si_dpm.c si_pi->ulv.pl = *pl; pl 7182 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (pi->min_vddc_in_table > pl->vddc) pl 7183 drivers/gpu/drm/amd/amdgpu/si_dpm.c pi->min_vddc_in_table = pl->vddc; pl 7185 drivers/gpu/drm/amd/amdgpu/si_dpm.c if (pi->max_vddc_in_table < pl->vddc) pl 7186 drivers/gpu/drm/amd/amdgpu/si_dpm.c pi->max_vddc_in_table = pl->vddc; pl 7192 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->mclk = adev->clock.default_mclk; pl 7193 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->sclk = adev->clock.default_sclk; pl 7194 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->vddc = vddc; pl 7195 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl->vddci = vddci; pl 7201 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; pl 7202 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; pl 7203 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; pl 7204 drivers/gpu/drm/amd/amdgpu/si_dpm.c adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; pl 7489 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct rv7xx_pl *pl; pl 7497 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl = &ps->performance_levels[current_index]; pl 7500 drivers/gpu/drm/amd/amdgpu/si_dpm.c current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); pl 7900 drivers/gpu/drm/amd/amdgpu/si_dpm.c struct rv7xx_pl *pl; pl 7907 drivers/gpu/drm/amd/amdgpu/si_dpm.c pl = &ps->performance_levels[i]; pl 7910 drivers/gpu/drm/amd/amdgpu/si_dpm.c i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); pl 7913 drivers/gpu/drm/amd/amdgpu/si_dpm.c i, pl->sclk, pl->mclk, pl->vddc, pl->vddci); pl 331 drivers/gpu/drm/amd/amdgpu/si_dpm.h struct rv7xx_pl *pl; pl 954 drivers/gpu/drm/amd/amdgpu/si_dpm.h struct rv7xx_pl pl; pl 370 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c const struct phm_phase_shedding_limits_table *pl, pl 378 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (i = 0; i < pl->count; i++) { pl 379 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (sclk < pl->entries[i].Sclk) { pl 1155 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, pl 1162 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c for (i = 0; i < pl->count; i++) { pl 1163 drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c if (memory_clock < pl->entries[i].Mclk) { pl 874 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c const struct phm_phase_shedding_limits_table *pl, pl 882 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (i = 0; i < pl->count; i++) { pl 883 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (sclk < pl->entries[i].Sclk) { pl 1210 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl, pl 1217 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c for (i = 0; i < pl->count; i++) { pl 1218 drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c if (memory_clock < pl->entries[i].Mclk) { pl 420 drivers/gpu/drm/drm_gem_vram_helper.c struct ttm_placement *pl) pl 430 drivers/gpu/drm/drm_gem_vram_helper.c *pl = gbo->placement; pl 478 drivers/gpu/drm/i915/gt/intel_lrc.c struct list_head *uninitialized_var(pl); pl 505 drivers/gpu/drm/i915/gt/intel_lrc.c pl = i915_sched_lookup_priolist(engine, prio); pl 509 drivers/gpu/drm/i915/gt/intel_lrc.c list_move(&rq->sched.link, pl); pl 941 drivers/gpu/drm/i915/gt/intel_lrc.c static void defer_request(struct i915_request *rq, struct list_head * const pl) pl 956 drivers/gpu/drm/i915/gt/intel_lrc.c list_move_tail(&rq->sched.link, pl); pl 28 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c #define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\ pl 29 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__) pl 31 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c #define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\ pl 32 drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__) pl 342 drivers/gpu/drm/nouveau/nouveau_bo.c set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) pl 347 drivers/gpu/drm/nouveau/nouveau_bo.c pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; pl 349 drivers/gpu/drm/nouveau/nouveau_bo.c pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; pl 351 drivers/gpu/drm/nouveau/nouveau_bo.c pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; pl 391 drivers/gpu/drm/nouveau/nouveau_bo.c struct ttm_placement *pl = &nvbo->placement; pl 396 drivers/gpu/drm/nouveau/nouveau_bo.c pl->placement = nvbo->placements; pl 397 drivers/gpu/drm/nouveau/nouveau_bo.c set_placement_list(nvbo->placements, &pl->num_placement, pl 400 drivers/gpu/drm/nouveau/nouveau_bo.c pl->busy_placement = nvbo->busy_placements; pl 401 drivers/gpu/drm/nouveau/nouveau_bo.c set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, pl 720 drivers/gpu/drm/nouveau/nouveau_bo.c nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) pl 734 drivers/gpu/drm/nouveau/nouveau_bo.c *pl = nvbo->placement; pl 36 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c static u32 pl_to_div(u32 pl) pl 38 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c if (pl >= ARRAY_SIZE(_pl_to_div)) pl 41 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c return _pl_to_div[pl]; pl 46 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c u32 pl; pl 48 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c for (pl = 0; pl < ARRAY_SIZE(_pl_to_div) - 1; pl++) { pl 49 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c if (_pl_to_div[pl] >= div) pl 50 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c return pl; pl 73 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); pl 84 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT; pl 95 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c divider = pll->m * clk->pl_to_div(pll->pl); pl 111 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c u32 pl; pl 139 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c for (pl = low_pl; pl <= high_pl; pl++) { pl 142 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c target_vco_f = target_clk_f * clk->pl_to_div(pl); pl 171 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c lwv = (vco_f + (clk->pl_to_div(pl) / 2)) pl 172 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c / clk->pl_to_div(pl); pl 179 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c best_pl = pl; pl 199 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c pll->pl = best_pl; pl 205 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c target_freq / KHZ, pll->m, pll->n, pll->pl, pl 206 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c clk->pl_to_div(pll->pl)); pl 344 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c if (pll->m == cur_pll.m && pll->pl == cur_pll.pl) pl 113 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h u32 pl; pl 141 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c static u32 pl_to_div(u32 pl) pl 143 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c return pl; pl 389 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c u32 old = cur_pll.base.pl; pl 390 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c u32 new = pll->pl; pl 399 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c cur_pll.base.pl = min(old | BIT(ffs(new) - 1), pl 404 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c cur_pll.base.pl = new; pl 442 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c if (pll->m == cur_pll.m && pll->pl == cur_pll.pl) pl 505 drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate); pl 1272 drivers/gpu/drm/radeon/btc_dpm.c struct rv7xx_pl *pl) pl 1275 drivers/gpu/drm/radeon/btc_dpm.c if ((pl->mclk == 0) || (pl->sclk == 0)) pl 1278 drivers/gpu/drm/radeon/btc_dpm.c if (pl->mclk == pl->sclk) pl 1281 drivers/gpu/drm/radeon/btc_dpm.c if (pl->mclk > pl->sclk) { pl 1282 drivers/gpu/drm/radeon/btc_dpm.c if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > rdev->pm.dpm.dyn_state.mclk_sclk_ratio) pl 1283 drivers/gpu/drm/radeon/btc_dpm.c pl->sclk = btc_get_valid_sclk(rdev, pl 1285 drivers/gpu/drm/radeon/btc_dpm.c (pl->mclk + pl 1289 drivers/gpu/drm/radeon/btc_dpm.c if ((pl->sclk - pl->mclk) > rdev->pm.dpm.dyn_state.sclk_mclk_delta) pl 1290 drivers/gpu/drm/radeon/btc_dpm.c pl->mclk = btc_get_valid_mclk(rdev, pl 1292 drivers/gpu/drm/radeon/btc_dpm.c pl->sclk - pl 1403 drivers/gpu/drm/radeon/btc_dpm.c struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl; pl 1797 drivers/gpu/drm/radeon/btc_dpm.c struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl; pl 1815 drivers/gpu/drm/radeon/btc_dpm.c struct rv7xx_pl *ulv_pl = eg_pi->ulv.pl; pl 2742 drivers/gpu/drm/radeon/btc_dpm.c struct rv7xx_pl *pl; pl 2751 drivers/gpu/drm/radeon/btc_dpm.c pl = &ps->low; pl 2753 drivers/gpu/drm/radeon/btc_dpm.c pl = &ps->medium; pl 2755 drivers/gpu/drm/radeon/btc_dpm.c pl = &ps->high; pl 2758 drivers/gpu/drm/radeon/btc_dpm.c current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); pl 2767 drivers/gpu/drm/radeon/btc_dpm.c struct rv7xx_pl *pl; pl 2776 drivers/gpu/drm/radeon/btc_dpm.c pl = &ps->low; pl 2778 drivers/gpu/drm/radeon/btc_dpm.c pl = &ps->medium; pl 2780 drivers/gpu/drm/radeon/btc_dpm.c pl = &ps->high; pl 2781 drivers/gpu/drm/radeon/btc_dpm.c return pl->sclk; pl 2790 drivers/gpu/drm/radeon/btc_dpm.c struct rv7xx_pl *pl; pl 2799 drivers/gpu/drm/radeon/btc_dpm.c pl = &ps->low; pl 2801 drivers/gpu/drm/radeon/btc_dpm.c pl = &ps->medium; pl 2803 drivers/gpu/drm/radeon/btc_dpm.c pl = &ps->high; pl 2804 drivers/gpu/drm/radeon/btc_dpm.c return pl->mclk; pl 49 drivers/gpu/drm/radeon/btc_dpm.h struct rv7xx_pl *pl); pl 5480 drivers/gpu/drm/radeon/ci_dpm.c struct ci_pl *pl = &ps->performance_levels[index]; pl 5484 drivers/gpu/drm/radeon/ci_dpm.c pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); pl 5485 drivers/gpu/drm/radeon/ci_dpm.c pl->sclk |= clock_info->ci.ucEngineClockHigh << 16; pl 5486 drivers/gpu/drm/radeon/ci_dpm.c pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); pl 5487 drivers/gpu/drm/radeon/ci_dpm.c pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16; pl 5489 drivers/gpu/drm/radeon/ci_dpm.c pl->pcie_gen = r600_get_pcie_gen_support(rdev, pl 5493 drivers/gpu/drm/radeon/ci_dpm.c pl->pcie_lane = r600_get_pcie_lane_support(rdev, pl 5498 drivers/gpu/drm/radeon/ci_dpm.c pi->acpi_pcie_gen = pl->pcie_gen; pl 5503 drivers/gpu/drm/radeon/ci_dpm.c pi->ulv.pl = *pl; pl 5509 drivers/gpu/drm/radeon/ci_dpm.c pl->mclk = pi->vbios_boot_state.mclk_bootup_value; pl 5510 drivers/gpu/drm/radeon/ci_dpm.c pl->sclk = pi->vbios_boot_state.sclk_bootup_value; pl 5511 drivers/gpu/drm/radeon/ci_dpm.c pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value; pl 5512 drivers/gpu/drm/radeon/ci_dpm.c pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value; pl 5518 drivers/gpu/drm/radeon/ci_dpm.c if (pi->pcie_gen_powersaving.max < pl->pcie_gen) pl 5519 drivers/gpu/drm/radeon/ci_dpm.c pi->pcie_gen_powersaving.max = pl->pcie_gen; pl 5520 drivers/gpu/drm/radeon/ci_dpm.c if (pi->pcie_gen_powersaving.min > pl->pcie_gen) pl 5521 drivers/gpu/drm/radeon/ci_dpm.c pi->pcie_gen_powersaving.min = pl->pcie_gen; pl 5522 drivers/gpu/drm/radeon/ci_dpm.c if (pi->pcie_lane_powersaving.max < pl->pcie_lane) pl 5523 drivers/gpu/drm/radeon/ci_dpm.c pi->pcie_lane_powersaving.max = pl->pcie_lane; pl 5524 drivers/gpu/drm/radeon/ci_dpm.c if (pi->pcie_lane_powersaving.min > pl->pcie_lane) pl 5525 drivers/gpu/drm/radeon/ci_dpm.c pi->pcie_lane_powersaving.min = pl->pcie_lane; pl 5529 drivers/gpu/drm/radeon/ci_dpm.c if (pi->pcie_gen_performance.max < pl->pcie_gen) pl 5530 drivers/gpu/drm/radeon/ci_dpm.c pi->pcie_gen_performance.max = pl->pcie_gen; pl 5531 drivers/gpu/drm/radeon/ci_dpm.c if (pi->pcie_gen_performance.min > pl->pcie_gen) pl 5532 drivers/gpu/drm/radeon/ci_dpm.c pi->pcie_gen_performance.min = pl->pcie_gen; pl 5533 drivers/gpu/drm/radeon/ci_dpm.c if (pi->pcie_lane_performance.max < pl->pcie_lane) pl 5534 drivers/gpu/drm/radeon/ci_dpm.c pi->pcie_lane_performance.max = pl->pcie_lane; pl 5535 drivers/gpu/drm/radeon/ci_dpm.c if (pi->pcie_lane_performance.min > pl->pcie_lane) pl 5536 drivers/gpu/drm/radeon/ci_dpm.c pi->pcie_lane_performance.min = pl->pcie_lane; pl 5960 drivers/gpu/drm/radeon/ci_dpm.c struct ci_pl *pl; pl 5967 drivers/gpu/drm/radeon/ci_dpm.c pl = &ps->performance_levels[i]; pl 5969 drivers/gpu/drm/radeon/ci_dpm.c i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane); pl 95 drivers/gpu/drm/radeon/ci_dpm.h struct ci_pl pl; pl 677 drivers/gpu/drm/radeon/cypress_dpm.c struct rv7xx_pl *pl, pl 687 drivers/gpu/drm/radeon/cypress_dpm.c ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0; pl 688 drivers/gpu/drm/radeon/cypress_dpm.c level->gen2XSP = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0; pl 689 drivers/gpu/drm/radeon/cypress_dpm.c level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0; pl 692 drivers/gpu/drm/radeon/cypress_dpm.c ret = rv740_populate_sclk_value(rdev, pl->sclk, &level->sclk); pl 698 drivers/gpu/drm/radeon/cypress_dpm.c (pl->mclk <= pi->mclk_stutter_mode_threshold) && pl 708 drivers/gpu/drm/radeon/cypress_dpm.c if (pl->mclk > pi->mclk_edc_enable_threshold) pl 711 drivers/gpu/drm/radeon/cypress_dpm.c if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold) pl 714 drivers/gpu/drm/radeon/cypress_dpm.c level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk); pl 717 drivers/gpu/drm/radeon/cypress_dpm.c if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >= pl 726 drivers/gpu/drm/radeon/cypress_dpm.c pl->sclk, pl 727 drivers/gpu/drm/radeon/cypress_dpm.c pl->mclk, pl 733 drivers/gpu/drm/radeon/cypress_dpm.c pl->sclk, pl 734 drivers/gpu/drm/radeon/cypress_dpm.c pl->mclk, pl 744 drivers/gpu/drm/radeon/cypress_dpm.c pl->vddc, pl 752 drivers/gpu/drm/radeon/cypress_dpm.c pl->vddci, pl 758 drivers/gpu/drm/radeon/cypress_dpm.c ret = cypress_populate_mvdd_value(rdev, pl->mclk, &level->mvdd); pl 829 drivers/gpu/drm/radeon/cypress_dpm.c struct rv7xx_pl *pl, pl 836 drivers/gpu/drm/radeon/cypress_dpm.c if (pl->mclk <= pl 44 drivers/gpu/drm/radeon/cypress_dpm.h struct rv7xx_pl *pl; pl 114 drivers/gpu/drm/radeon/cypress_dpm.h struct rv7xx_pl *pl, pl 2617 drivers/gpu/drm/radeon/kv_dpm.c struct kv_pl *pl = &ps->levels[index]; pl 2622 drivers/gpu/drm/radeon/kv_dpm.c pl->sclk = sclk; pl 2623 drivers/gpu/drm/radeon/kv_dpm.c pl->vddc_index = clock_info->sumo.vddcIndex; pl 2628 drivers/gpu/drm/radeon/kv_dpm.c pl->ds_divider_index = 5; pl 2629 drivers/gpu/drm/radeon/kv_dpm.c pl->ss_divider_index = 5; pl 2859 drivers/gpu/drm/radeon/kv_dpm.c struct kv_pl *pl = &ps->levels[i]; pl 2861 drivers/gpu/drm/radeon/kv_dpm.c i, pl->sclk, pl 2862 drivers/gpu/drm/radeon/kv_dpm.c kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index)); pl 1616 drivers/gpu/drm/radeon/ni_dpm.c struct rv7xx_pl *pl, pl 1623 drivers/gpu/drm/radeon/ni_dpm.c (u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk); pl 1626 drivers/gpu/drm/radeon/ni_dpm.c radeon_atom_set_engine_dram_timings(rdev, pl->sclk, pl->mclk); pl 2310 drivers/gpu/drm/radeon/ni_dpm.c struct rv7xx_pl *pl, pl 2322 drivers/gpu/drm/radeon/ni_dpm.c ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0; pl 2324 drivers/gpu/drm/radeon/ni_dpm.c ret = ni_populate_sclk_value(rdev, pl->sclk, &level->sclk); pl 2330 drivers/gpu/drm/radeon/ni_dpm.c (pl->mclk <= pi->mclk_stutter_mode_threshold) && pl 2337 drivers/gpu/drm/radeon/ni_dpm.c if (pl->mclk > pi->mclk_edc_enable_threshold) pl 2339 drivers/gpu/drm/radeon/ni_dpm.c if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold) pl 2342 drivers/gpu/drm/radeon/ni_dpm.c level->strobeMode = cypress_get_strobe_mode_settings(rdev, pl->mclk); pl 2345 drivers/gpu/drm/radeon/ni_dpm.c if (cypress_get_mclk_frequency_ratio(rdev, pl->mclk, true) >= pl 2352 drivers/gpu/drm/radeon/ni_dpm.c if (pl->mclk > ni_pi->mclk_rtt_mode_threshold) pl 2356 drivers/gpu/drm/radeon/ni_dpm.c ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk, pl 2361 drivers/gpu/drm/radeon/ni_dpm.c ret = ni_populate_mclk_value(rdev, pl->sclk, pl->mclk, &level->mclk, 1, 1); pl 2367 drivers/gpu/drm/radeon/ni_dpm.c pl->vddc, &level->vddc); pl 2380 drivers/gpu/drm/radeon/ni_dpm.c pl->vddci, &level->vddci); pl 2385 drivers/gpu/drm/radeon/ni_dpm.c ni_populate_mvdd_value(rdev, pl->mclk, &level->mvdd); pl 2958 drivers/gpu/drm/radeon/ni_dpm.c struct rv7xx_pl *pl, pl 2965 drivers/gpu/drm/radeon/ni_dpm.c if (pl->mclk <= ni_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) pl 3927 drivers/gpu/drm/radeon/ni_dpm.c struct rv7xx_pl *pl = &ps->performance_levels[index]; pl 3931 drivers/gpu/drm/radeon/ni_dpm.c pl->sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); pl 3932 drivers/gpu/drm/radeon/ni_dpm.c pl->sclk |= clock_info->evergreen.ucEngineClockHigh << 16; pl 3933 drivers/gpu/drm/radeon/ni_dpm.c pl->mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); pl 3934 drivers/gpu/drm/radeon/ni_dpm.c pl->mclk |= clock_info->evergreen.ucMemoryClockHigh << 16; pl 3936 drivers/gpu/drm/radeon/ni_dpm.c pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC); pl 3937 drivers/gpu/drm/radeon/ni_dpm.c pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI); pl 3938 drivers/gpu/drm/radeon/ni_dpm.c pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags); pl 3941 drivers/gpu/drm/radeon/ni_dpm.c if (pl->vddc == 0xff01) { pl 3943 drivers/gpu/drm/radeon/ni_dpm.c pl->vddc = pi->max_vddc; pl 3947 drivers/gpu/drm/radeon/ni_dpm.c pi->acpi_vddc = pl->vddc; pl 3948 drivers/gpu/drm/radeon/ni_dpm.c eg_pi->acpi_vddci = pl->vddci; pl 3957 drivers/gpu/drm/radeon/ni_dpm.c eg_pi->ulv.pl = pl; pl 3960 drivers/gpu/drm/radeon/ni_dpm.c if (pi->min_vddc_in_table > pl->vddc) pl 3961 drivers/gpu/drm/radeon/ni_dpm.c pi->min_vddc_in_table = pl->vddc; pl 3963 drivers/gpu/drm/radeon/ni_dpm.c if (pi->max_vddc_in_table < pl->vddc) pl 3964 drivers/gpu/drm/radeon/ni_dpm.c pi->max_vddc_in_table = pl->vddc; pl 3970 drivers/gpu/drm/radeon/ni_dpm.c pl->mclk = rdev->clock.default_mclk; pl 3971 drivers/gpu/drm/radeon/ni_dpm.c pl->sclk = rdev->clock.default_sclk; pl 3972 drivers/gpu/drm/radeon/ni_dpm.c pl->vddc = vddc; pl 3973 drivers/gpu/drm/radeon/ni_dpm.c pl->vddci = vddci; pl 3978 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; pl 3979 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; pl 3980 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; pl 3981 drivers/gpu/drm/radeon/ni_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; pl 4286 drivers/gpu/drm/radeon/ni_dpm.c struct rv7xx_pl *pl; pl 4293 drivers/gpu/drm/radeon/ni_dpm.c pl = &ps->performance_levels[i]; pl 4296 drivers/gpu/drm/radeon/ni_dpm.c i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); pl 4299 drivers/gpu/drm/radeon/ni_dpm.c i, pl->sclk, pl->mclk, pl->vddc, pl->vddci); pl 4310 drivers/gpu/drm/radeon/ni_dpm.c struct rv7xx_pl *pl; pl 4318 drivers/gpu/drm/radeon/ni_dpm.c pl = &ps->performance_levels[current_index]; pl 4321 drivers/gpu/drm/radeon/ni_dpm.c current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); pl 4330 drivers/gpu/drm/radeon/ni_dpm.c struct rv7xx_pl *pl; pl 4338 drivers/gpu/drm/radeon/ni_dpm.c pl = &ps->performance_levels[current_index]; pl 4339 drivers/gpu/drm/radeon/ni_dpm.c return pl->sclk; pl 4348 drivers/gpu/drm/radeon/ni_dpm.c struct rv7xx_pl *pl; pl 4356 drivers/gpu/drm/radeon/ni_dpm.c pl = &ps->performance_levels[current_index]; pl 4357 drivers/gpu/drm/radeon/ni_dpm.c return pl->mclk; pl 1823 drivers/gpu/drm/radeon/rv6xx_dpm.c struct rv6xx_pl *pl; pl 1827 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->low; pl 1830 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->medium; pl 1834 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->high; pl 1843 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->mclk = mclk; pl 1844 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->sclk = sclk; pl 1845 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->vddc = le16_to_cpu(clock_info->r600.usVDDC); pl 1846 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->flags = le32_to_cpu(clock_info->r600.ulFlags); pl 1849 drivers/gpu/drm/radeon/rv6xx_dpm.c if (pl->vddc == 0xff01) { pl 1851 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->vddc = vddc; pl 1855 drivers/gpu/drm/radeon/rv6xx_dpm.c if (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) { pl 1857 drivers/gpu/drm/radeon/rv6xx_dpm.c if (pl->vddc < 1100) pl 1858 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->flags &= ~ATOM_PPLIB_R600_FLAGS_PCIEGEN2; pl 1866 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->mclk = rdev->clock.default_mclk; pl 1867 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->sclk = rdev->clock.default_sclk; pl 1868 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->vddc = vddc; pl 2011 drivers/gpu/drm/radeon/rv6xx_dpm.c struct rv6xx_pl *pl; pl 2016 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->low; pl 2018 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->sclk, pl->mclk, pl->vddc); pl 2019 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->medium; pl 2021 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->sclk, pl->mclk, pl->vddc); pl 2022 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->high; pl 2024 drivers/gpu/drm/radeon/rv6xx_dpm.c pl->sclk, pl->mclk, pl->vddc); pl 2033 drivers/gpu/drm/radeon/rv6xx_dpm.c struct rv6xx_pl *pl; pl 2042 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->low; pl 2044 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->medium; pl 2046 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->high; pl 2049 drivers/gpu/drm/radeon/rv6xx_dpm.c current_index, pl->sclk, pl->mclk, pl->vddc); pl 2058 drivers/gpu/drm/radeon/rv6xx_dpm.c struct rv6xx_pl *pl; pl 2067 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->low; pl 2069 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->medium; pl 2071 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->high; pl 2072 drivers/gpu/drm/radeon/rv6xx_dpm.c return pl->sclk; pl 2081 drivers/gpu/drm/radeon/rv6xx_dpm.c struct rv6xx_pl *pl; pl 2090 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->low; pl 2092 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->medium; pl 2094 drivers/gpu/drm/radeon/rv6xx_dpm.c pl = &ps->high; pl 2095 drivers/gpu/drm/radeon/rv6xx_dpm.c return pl->mclk; pl 227 drivers/gpu/drm/radeon/rv770_dpm.c struct rv7xx_pl *pl) pl 229 drivers/gpu/drm/radeon/rv770_dpm.c return (pl->flags & ATOM_PPLIB_R600_FLAGS_LOWPOWER) ? pl 614 drivers/gpu/drm/radeon/rv770_dpm.c struct rv7xx_pl *pl, pl 622 drivers/gpu/drm/radeon/rv770_dpm.c ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0; pl 623 drivers/gpu/drm/radeon/rv770_dpm.c level->gen2XSP = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0; pl 624 drivers/gpu/drm/radeon/rv770_dpm.c level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0; pl 628 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv740_populate_sclk_value(rdev, pl->sclk, pl 631 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv730_populate_sclk_value(rdev, pl->sclk, pl 634 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv770_populate_sclk_value(rdev, pl->sclk, pl 641 drivers/gpu/drm/radeon/rv770_dpm.c if (pl->mclk <= pi->mclk_strobe_mode_threshold) pl 643 drivers/gpu/drm/radeon/rv770_dpm.c rv740_get_mclk_frequency_ratio(pl->mclk) | 0x10; pl 647 drivers/gpu/drm/radeon/rv770_dpm.c if (pl->mclk > pi->mclk_edc_enable_threshold) pl 652 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv740_populate_mclk_value(rdev, pl->sclk, pl 653 drivers/gpu/drm/radeon/rv770_dpm.c pl->mclk, &level->mclk); pl 655 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv730_populate_mclk_value(rdev, pl->sclk, pl 656 drivers/gpu/drm/radeon/rv770_dpm.c pl->mclk, &level->mclk); pl 658 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv770_populate_mclk_value(rdev, pl->sclk, pl 659 drivers/gpu/drm/radeon/rv770_dpm.c pl->mclk, &level->mclk); pl 663 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv770_populate_vddc_value(rdev, pl->vddc, pl 668 drivers/gpu/drm/radeon/rv770_dpm.c ret = rv770_populate_mvdd_value(rdev, pl->mclk, &level->mvdd); pl 2181 drivers/gpu/drm/radeon/rv770_dpm.c struct rv7xx_pl *pl; pl 2185 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->low; pl 2188 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->medium; pl 2192 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->high; pl 2202 drivers/gpu/drm/radeon/rv770_dpm.c pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC); pl 2203 drivers/gpu/drm/radeon/rv770_dpm.c pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI); pl 2204 drivers/gpu/drm/radeon/rv770_dpm.c pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags); pl 2211 drivers/gpu/drm/radeon/rv770_dpm.c pl->vddc = le16_to_cpu(clock_info->r600.usVDDC); pl 2212 drivers/gpu/drm/radeon/rv770_dpm.c pl->flags = le32_to_cpu(clock_info->r600.ulFlags); pl 2215 drivers/gpu/drm/radeon/rv770_dpm.c pl->mclk = mclk; pl 2216 drivers/gpu/drm/radeon/rv770_dpm.c pl->sclk = sclk; pl 2219 drivers/gpu/drm/radeon/rv770_dpm.c if (pl->vddc == 0xff01) { pl 2221 drivers/gpu/drm/radeon/rv770_dpm.c pl->vddc = pi->max_vddc; pl 2225 drivers/gpu/drm/radeon/rv770_dpm.c pi->acpi_vddc = pl->vddc; pl 2227 drivers/gpu/drm/radeon/rv770_dpm.c eg_pi->acpi_vddci = pl->vddci; pl 2237 drivers/gpu/drm/radeon/rv770_dpm.c eg_pi->ulv.pl = pl; pl 2241 drivers/gpu/drm/radeon/rv770_dpm.c if (pi->min_vddc_in_table > pl->vddc) pl 2242 drivers/gpu/drm/radeon/rv770_dpm.c pi->min_vddc_in_table = pl->vddc; pl 2244 drivers/gpu/drm/radeon/rv770_dpm.c if (pi->max_vddc_in_table < pl->vddc) pl 2245 drivers/gpu/drm/radeon/rv770_dpm.c pi->max_vddc_in_table = pl->vddc; pl 2251 drivers/gpu/drm/radeon/rv770_dpm.c pl->mclk = rdev->clock.default_mclk; pl 2252 drivers/gpu/drm/radeon/rv770_dpm.c pl->sclk = rdev->clock.default_sclk; pl 2253 drivers/gpu/drm/radeon/rv770_dpm.c pl->vddc = vddc; pl 2254 drivers/gpu/drm/radeon/rv770_dpm.c pl->vddci = vddci; pl 2259 drivers/gpu/drm/radeon/rv770_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; pl 2260 drivers/gpu/drm/radeon/rv770_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; pl 2261 drivers/gpu/drm/radeon/rv770_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; pl 2262 drivers/gpu/drm/radeon/rv770_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; pl 2436 drivers/gpu/drm/radeon/rv770_dpm.c struct rv7xx_pl *pl; pl 2442 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->low; pl 2444 drivers/gpu/drm/radeon/rv770_dpm.c pl->sclk, pl->mclk, pl->vddc, pl->vddci); pl 2445 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->medium; pl 2447 drivers/gpu/drm/radeon/rv770_dpm.c pl->sclk, pl->mclk, pl->vddc, pl->vddci); pl 2448 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->high; pl 2450 drivers/gpu/drm/radeon/rv770_dpm.c pl->sclk, pl->mclk, pl->vddc, pl->vddci); pl 2452 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->low; pl 2454 drivers/gpu/drm/radeon/rv770_dpm.c pl->sclk, pl->mclk, pl->vddc); pl 2455 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->medium; pl 2457 drivers/gpu/drm/radeon/rv770_dpm.c pl->sclk, pl->mclk, pl->vddc); pl 2458 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->high; pl 2460 drivers/gpu/drm/radeon/rv770_dpm.c pl->sclk, pl->mclk, pl->vddc); pl 2470 drivers/gpu/drm/radeon/rv770_dpm.c struct rv7xx_pl *pl; pl 2479 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->low; pl 2481 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->medium; pl 2483 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->high; pl 2487 drivers/gpu/drm/radeon/rv770_dpm.c current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); pl 2490 drivers/gpu/drm/radeon/rv770_dpm.c current_index, pl->sclk, pl->mclk, pl->vddc); pl 2499 drivers/gpu/drm/radeon/rv770_dpm.c struct rv7xx_pl *pl; pl 2508 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->low; pl 2510 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->medium; pl 2512 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->high; pl 2513 drivers/gpu/drm/radeon/rv770_dpm.c return pl->sclk; pl 2521 drivers/gpu/drm/radeon/rv770_dpm.c struct rv7xx_pl *pl; pl 2530 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->low; pl 2532 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->medium; pl 2534 drivers/gpu/drm/radeon/rv770_dpm.c pl = &ps->high; pl 2535 drivers/gpu/drm/radeon/rv770_dpm.c return pl->mclk; pl 223 drivers/gpu/drm/radeon/rv770_dpm.h struct rv7xx_pl *pl); pl 1756 drivers/gpu/drm/radeon/si_dpm.c struct rv7xx_pl *pl, pl 4294 drivers/gpu/drm/radeon/si_dpm.c struct rv7xx_pl *pl, pl 4302 drivers/gpu/drm/radeon/si_dpm.c (u8)si_calculate_memory_refresh_rate(rdev, pl->sclk); pl 4305 drivers/gpu/drm/radeon/si_dpm.c pl->sclk, pl 4306 drivers/gpu/drm/radeon/si_dpm.c pl->mclk); pl 4639 drivers/gpu/drm/radeon/si_dpm.c ret = si_convert_power_level_to_smc(rdev, &ulv->pl, pl 4668 drivers/gpu/drm/radeon/si_dpm.c ret = si_populate_memory_timing_parameters(rdev, &ulv->pl, pl 4759 drivers/gpu/drm/radeon/si_dpm.c if (ulv->supported && ulv->pl.vddc) { pl 4978 drivers/gpu/drm/radeon/si_dpm.c struct rv7xx_pl *pl, pl 4993 drivers/gpu/drm/radeon/si_dpm.c level->gen2PCIE = (u8)pl->pcie_gen; pl 4995 drivers/gpu/drm/radeon/si_dpm.c ret = si_populate_sclk_value(rdev, pl->sclk, &level->sclk); pl 5002 drivers/gpu/drm/radeon/si_dpm.c (pl->mclk <= pi->mclk_stutter_mode_threshold) && pl 5013 drivers/gpu/drm/radeon/si_dpm.c if (pl->mclk > pi->mclk_edc_enable_threshold) pl 5016 drivers/gpu/drm/radeon/si_dpm.c if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold) pl 5019 drivers/gpu/drm/radeon/si_dpm.c level->strobeMode = si_get_strobe_mode_settings(rdev, pl->mclk); pl 5022 drivers/gpu/drm/radeon/si_dpm.c if (si_get_mclk_frequency_ratio(pl->mclk, true) >= pl 5032 drivers/gpu/drm/radeon/si_dpm.c pl->mclk); pl 5038 drivers/gpu/drm/radeon/si_dpm.c pl->sclk, pl 5039 drivers/gpu/drm/radeon/si_dpm.c pl->mclk, pl 5047 drivers/gpu/drm/radeon/si_dpm.c pl->vddc, &level->vddc); pl 5063 drivers/gpu/drm/radeon/si_dpm.c pl->vddci, &level->vddci); pl 5071 drivers/gpu/drm/radeon/si_dpm.c pl->vddc, pl 5072 drivers/gpu/drm/radeon/si_dpm.c pl->sclk, pl 5073 drivers/gpu/drm/radeon/si_dpm.c pl->mclk, pl 5081 drivers/gpu/drm/radeon/si_dpm.c ret = si_populate_mvdd_value(rdev, pl->mclk, &level->mvdd); pl 5155 drivers/gpu/drm/radeon/si_dpm.c if (state->performance_levels[0].mclk != ulv->pl.mclk) pl 5163 drivers/gpu/drm/radeon/si_dpm.c if (ulv->pl.vddc < pl 5300 drivers/gpu/drm/radeon/si_dpm.c if (ulv->supported && ulv->pl.vddc) { pl 5618 drivers/gpu/drm/radeon/si_dpm.c struct rv7xx_pl *pl, pl 5625 drivers/gpu/drm/radeon/si_dpm.c if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) pl 5673 drivers/gpu/drm/radeon/si_dpm.c if (ulv->supported && ulv->pl.vddc != 0) pl 5674 drivers/gpu/drm/radeon/si_dpm.c si_convert_mc_reg_table_entry_to_smc(rdev, &ulv->pl, pl 6741 drivers/gpu/drm/radeon/si_dpm.c struct rv7xx_pl *pl = &ps->performance_levels[index]; pl 6746 drivers/gpu/drm/radeon/si_dpm.c pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow); pl 6747 drivers/gpu/drm/radeon/si_dpm.c pl->sclk |= clock_info->si.ucEngineClockHigh << 16; pl 6748 drivers/gpu/drm/radeon/si_dpm.c pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); pl 6749 drivers/gpu/drm/radeon/si_dpm.c pl->mclk |= clock_info->si.ucMemoryClockHigh << 16; pl 6751 drivers/gpu/drm/radeon/si_dpm.c pl->vddc = le16_to_cpu(clock_info->si.usVDDC); pl 6752 drivers/gpu/drm/radeon/si_dpm.c pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); pl 6753 drivers/gpu/drm/radeon/si_dpm.c pl->flags = le32_to_cpu(clock_info->si.ulFlags); pl 6754 drivers/gpu/drm/radeon/si_dpm.c pl->pcie_gen = r600_get_pcie_gen_support(rdev, pl 6760 drivers/gpu/drm/radeon/si_dpm.c ret = si_get_leakage_voltage_from_leakage_index(rdev, pl->vddc, pl 6763 drivers/gpu/drm/radeon/si_dpm.c pl->vddc = leakage_voltage; pl 6766 drivers/gpu/drm/radeon/si_dpm.c pi->acpi_vddc = pl->vddc; pl 6767 drivers/gpu/drm/radeon/si_dpm.c eg_pi->acpi_vddci = pl->vddci; pl 6768 drivers/gpu/drm/radeon/si_dpm.c si_pi->acpi_pcie_gen = pl->pcie_gen; pl 6775 drivers/gpu/drm/radeon/si_dpm.c si_pi->ulv.pl = *pl; pl 6782 drivers/gpu/drm/radeon/si_dpm.c if (pi->min_vddc_in_table > pl->vddc) pl 6783 drivers/gpu/drm/radeon/si_dpm.c pi->min_vddc_in_table = pl->vddc; pl 6785 drivers/gpu/drm/radeon/si_dpm.c if (pi->max_vddc_in_table < pl->vddc) pl 6786 drivers/gpu/drm/radeon/si_dpm.c pi->max_vddc_in_table = pl->vddc; pl 6792 drivers/gpu/drm/radeon/si_dpm.c pl->mclk = rdev->clock.default_mclk; pl 6793 drivers/gpu/drm/radeon/si_dpm.c pl->sclk = rdev->clock.default_sclk; pl 6794 drivers/gpu/drm/radeon/si_dpm.c pl->vddc = vddc; pl 6795 drivers/gpu/drm/radeon/si_dpm.c pl->vddci = vddci; pl 6801 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; pl 6802 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; pl 6803 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; pl 6804 drivers/gpu/drm/radeon/si_dpm.c rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; pl 7098 drivers/gpu/drm/radeon/si_dpm.c struct rv7xx_pl *pl; pl 7106 drivers/gpu/drm/radeon/si_dpm.c pl = &ps->performance_levels[current_index]; pl 7109 drivers/gpu/drm/radeon/si_dpm.c current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); pl 7118 drivers/gpu/drm/radeon/si_dpm.c struct rv7xx_pl *pl; pl 7126 drivers/gpu/drm/radeon/si_dpm.c pl = &ps->performance_levels[current_index]; pl 7127 drivers/gpu/drm/radeon/si_dpm.c return pl->sclk; pl 7136 drivers/gpu/drm/radeon/si_dpm.c struct rv7xx_pl *pl; pl 7144 drivers/gpu/drm/radeon/si_dpm.c pl = &ps->performance_levels[current_index]; pl 7145 drivers/gpu/drm/radeon/si_dpm.c return pl->mclk; pl 147 drivers/gpu/drm/radeon/si_dpm.h struct rv7xx_pl pl; pl 547 drivers/gpu/drm/radeon/sumo_dpm.c struct sumo_pl *pl, u32 index) pl 555 drivers/gpu/drm/radeon/sumo_dpm.c pl->sclk, false, ÷rs); pl 561 drivers/gpu/drm/radeon/sumo_dpm.c sumo_set_vid(rdev, index, pl->vddc_index); pl 563 drivers/gpu/drm/radeon/sumo_dpm.c if (pl->ss_divider_index == 0 || pl->ds_divider_index == 0) { pl 567 drivers/gpu/drm/radeon/sumo_dpm.c sumo_set_ss_dividers(rdev, index, pl->ss_divider_index); pl 568 drivers/gpu/drm/radeon/sumo_dpm.c sumo_set_ds_dividers(rdev, index, pl->ds_divider_index); pl 574 drivers/gpu/drm/radeon/sumo_dpm.c sumo_set_allos_gnb_slow(rdev, index, pl->allow_gnb_slow); pl 577 drivers/gpu/drm/radeon/sumo_dpm.c sumo_set_tdp_limit(rdev, index, pl->sclk_dpm_tdp_limit); pl 1435 drivers/gpu/drm/radeon/sumo_dpm.c struct sumo_pl *pl = &ps->levels[index]; pl 1440 drivers/gpu/drm/radeon/sumo_dpm.c pl->sclk = sclk; pl 1441 drivers/gpu/drm/radeon/sumo_dpm.c pl->vddc_index = clock_info->sumo.vddcIndex; pl 1442 drivers/gpu/drm/radeon/sumo_dpm.c pl->sclk_dpm_tdp_limit = clock_info->sumo.tdpLimit; pl 1447 drivers/gpu/drm/radeon/sumo_dpm.c pl->ds_divider_index = 5; pl 1448 drivers/gpu/drm/radeon/sumo_dpm.c pl->ss_divider_index = 4; pl 1804 drivers/gpu/drm/radeon/sumo_dpm.c struct sumo_pl *pl = &ps->levels[i]; pl 1806 drivers/gpu/drm/radeon/sumo_dpm.c i, pl->sclk, pl 1807 drivers/gpu/drm/radeon/sumo_dpm.c sumo_convert_voltage_index_to_value(rdev, pl->vddc_index)); pl 1818 drivers/gpu/drm/radeon/sumo_dpm.c struct sumo_pl *pl; pl 1824 drivers/gpu/drm/radeon/sumo_dpm.c pl = &pi->boost_pl; pl 1827 drivers/gpu/drm/radeon/sumo_dpm.c current_index, pl->sclk, pl 1828 drivers/gpu/drm/radeon/sumo_dpm.c sumo_convert_voltage_index_to_value(rdev, pl->vddc_index)); pl 1832 drivers/gpu/drm/radeon/sumo_dpm.c pl = &ps->levels[current_index]; pl 1835 drivers/gpu/drm/radeon/sumo_dpm.c current_index, pl->sclk, pl 1836 drivers/gpu/drm/radeon/sumo_dpm.c sumo_convert_voltage_index_to_value(rdev, pl->vddc_index)); pl 1845 drivers/gpu/drm/radeon/sumo_dpm.c struct sumo_pl *pl; pl 1851 drivers/gpu/drm/radeon/sumo_dpm.c pl = &pi->boost_pl; pl 1852 drivers/gpu/drm/radeon/sumo_dpm.c return pl->sclk; pl 1856 drivers/gpu/drm/radeon/sumo_dpm.c pl = &ps->levels[current_index]; pl 1857 drivers/gpu/drm/radeon/sumo_dpm.c return pl->sclk; pl 716 drivers/gpu/drm/radeon/trinity_dpm.c struct trinity_pl *pl, u32 index) pl 723 drivers/gpu/drm/radeon/trinity_dpm.c trinity_set_divider_value(rdev, index, pl->sclk); pl 724 drivers/gpu/drm/radeon/trinity_dpm.c trinity_set_vid(rdev, index, pl->vddc_index); pl 725 drivers/gpu/drm/radeon/trinity_dpm.c trinity_set_ss_dividers(rdev, index, pl->ss_divider_index); pl 726 drivers/gpu/drm/radeon/trinity_dpm.c trinity_set_ds_dividers(rdev, index, pl->ds_divider_index); pl 727 drivers/gpu/drm/radeon/trinity_dpm.c trinity_set_allos_gnb_slow(rdev, index, pl->allow_gnb_slow); pl 728 drivers/gpu/drm/radeon/trinity_dpm.c trinity_set_force_nbp_state(rdev, index, pl->force_nbp_state); pl 729 drivers/gpu/drm/radeon/trinity_dpm.c trinity_set_display_wm(rdev, index, pl->display_wm); pl 730 drivers/gpu/drm/radeon/trinity_dpm.c trinity_set_vce_wm(rdev, index, pl->vce_wm); pl 1714 drivers/gpu/drm/radeon/trinity_dpm.c struct trinity_pl *pl = &ps->levels[index]; pl 1719 drivers/gpu/drm/radeon/trinity_dpm.c pl->sclk = sclk; pl 1720 drivers/gpu/drm/radeon/trinity_dpm.c pl->vddc_index = clock_info->sumo.vddcIndex; pl 1725 drivers/gpu/drm/radeon/trinity_dpm.c pl->ds_divider_index = 5; pl 1726 drivers/gpu/drm/radeon/trinity_dpm.c pl->ss_divider_index = 5; pl 2022 drivers/gpu/drm/radeon/trinity_dpm.c struct trinity_pl *pl = &ps->levels[i]; pl 2024 drivers/gpu/drm/radeon/trinity_dpm.c i, pl->sclk, pl 2025 drivers/gpu/drm/radeon/trinity_dpm.c trinity_convert_voltage_index_to_value(rdev, pl->vddc_index)); pl 2036 drivers/gpu/drm/radeon/trinity_dpm.c struct trinity_pl *pl; pl 2044 drivers/gpu/drm/radeon/trinity_dpm.c pl = &ps->levels[current_index]; pl 2047 drivers/gpu/drm/radeon/trinity_dpm.c current_index, pl->sclk, pl 2048 drivers/gpu/drm/radeon/trinity_dpm.c trinity_convert_voltage_index_to_value(rdev, pl->vddc_index)); pl 2057 drivers/gpu/drm/radeon/trinity_dpm.c struct trinity_pl *pl; pl 2065 drivers/gpu/drm/radeon/trinity_dpm.c pl = &ps->levels[current_index]; pl 2066 drivers/gpu/drm/radeon/trinity_dpm.c return pl->sclk; pl 338 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c struct ttm_place pl; pl 355 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c pl.fpfn = 0; pl 356 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c pl.lpfn = 0; pl 357 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB pl 360 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c pl.flags |= TTM_PL_FLAG_NO_EVICT; pl 364 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c placement.placement = &pl; pl 23 drivers/hwtracing/stm/dummy_stm.c u64 pl = 0; pl 26 drivers/hwtracing/stm/dummy_stm.c pl = *(u64 *)payload; pl 29 drivers/hwtracing/stm/dummy_stm.c pl &= (1ull << (size * 8)) - 1; pl 31 drivers/hwtracing/stm/dummy_stm.c packet, size, pl); pl 704 drivers/lightnvm/core.c ppa.g.pl = pl_idx; pl 869 drivers/lightnvm/core.c int ret, pg, pl; pl 898 drivers/lightnvm/core.c ppa.g.pl = geo->num_pln - 1; pl 927 drivers/lightnvm/core.c for (pl = 0; pl < geo->num_pln; pl++) { pl 929 drivers/lightnvm/core.c ppa.g.pl = pl; pl 966 drivers/lightnvm/core.c int ret, blk, pl, offset, blktype; pl 972 drivers/lightnvm/core.c for (pl = 0; pl < geo->pln_mode; pl++) { pl 973 drivers/lightnvm/core.c if (blks[offset + pl] & pl 975 drivers/lightnvm/core.c blktype = blks[offset + pl]; pl 991 drivers/lightnvm/pblk.h ppa.g.pl = (paddr & ppaf->pln_mask) >> ppaf->pln_offset; pl 1048 drivers/lightnvm/pblk.h paddr |= (u64)p.g.pl << ppaf->pln_offset; pl 1204 drivers/lightnvm/pblk.h p->g.pg, p->g.pl, p->g.sec); pl 1244 drivers/lightnvm/pblk.h ppa->g.pl < geo->num_pln && pl 654 drivers/md/dm-integrity.c static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, pl 665 drivers/md/dm-integrity.c va = lowmem_page_address(pl[pl_index].page); pl 956 drivers/md/dm-integrity.c io_req.mem.ptr.pl = &ic->journal_io[pl_index]; pl 958 drivers/md/dm-integrity.c io_req.mem.ptr.pl = &ic->journal[pl_index]; pl 1076 drivers/md/dm-integrity.c io_req.mem.ptr.pl = &ic->journal[pl_index]; pl 3145 drivers/md/dm-integrity.c static void dm_integrity_free_page_list(struct page_list *pl) pl 3149 drivers/md/dm-integrity.c if (!pl) pl 3151 drivers/md/dm-integrity.c for (i = 0; pl[i].page; i++) pl 3152 drivers/md/dm-integrity.c __free_page(pl[i].page); pl 3153 drivers/md/dm-integrity.c kvfree(pl); pl 3158 drivers/md/dm-integrity.c struct page_list *pl; pl 3161 drivers/md/dm-integrity.c pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO); pl 3162 drivers/md/dm-integrity.c if (!pl) pl 3166 drivers/md/dm-integrity.c pl[i].page = alloc_page(GFP_KERNEL); pl 3167 drivers/md/dm-integrity.c if (!pl[i].page) { pl 3168 drivers/md/dm-integrity.c dm_integrity_free_page_list(pl); pl 3172 drivers/md/dm-integrity.c pl[i - 1].next = &pl[i]; pl 3174 drivers/md/dm-integrity.c pl[i].page = NULL; pl 3175 drivers/md/dm-integrity.c pl[i].next = NULL; pl 3177 drivers/md/dm-integrity.c return pl; pl 3189 drivers/md/dm-integrity.c struct page_list *pl) pl 3222 drivers/md/dm-integrity.c char *va = lowmem_page_address(pl[idx].page); pl 183 drivers/md/dm-io.c struct page_list *pl = (struct page_list *) dp->context_ptr; pl 185 drivers/md/dm-io.c *p = pl->page; pl 192 drivers/md/dm-io.c struct page_list *pl = (struct page_list *) dp->context_ptr; pl 193 drivers/md/dm-io.c dp->context_ptr = pl->next; pl 197 drivers/md/dm-io.c static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) pl 202 drivers/md/dm-io.c dp->context_ptr = pl; pl 502 drivers/md/dm-io.c list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); pl 216 drivers/md/dm-kcopyd.c struct page_list *pl; pl 218 drivers/md/dm-kcopyd.c pl = kmalloc(sizeof(*pl), gfp); pl 219 drivers/md/dm-kcopyd.c if (!pl) pl 222 drivers/md/dm-kcopyd.c pl->page = alloc_page(gfp); pl 223 drivers/md/dm-kcopyd.c if (!pl->page) { pl 224 drivers/md/dm-kcopyd.c kfree(pl); pl 228 drivers/md/dm-kcopyd.c return pl; pl 231 drivers/md/dm-kcopyd.c static void free_pl(struct page_list *pl) pl 233 drivers/md/dm-kcopyd.c __free_page(pl->page); pl 234 drivers/md/dm-kcopyd.c kfree(pl); pl 241 drivers/md/dm-kcopyd.c static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) pl 246 drivers/md/dm-kcopyd.c next = pl->next; pl 249 drivers/md/dm-kcopyd.c free_pl(pl); pl 251 drivers/md/dm-kcopyd.c pl->next = kc->pages; pl 252 drivers/md/dm-kcopyd.c kc->pages = pl; pl 256 drivers/md/dm-kcopyd.c pl = next; pl 257 drivers/md/dm-kcopyd.c } while (pl); pl 263 drivers/md/dm-kcopyd.c struct page_list *pl; pl 268 drivers/md/dm-kcopyd.c pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM); pl 269 drivers/md/dm-kcopyd.c if (unlikely(!pl)) { pl 271 drivers/md/dm-kcopyd.c pl = kc->pages; pl 272 drivers/md/dm-kcopyd.c if (unlikely(!pl)) pl 274 drivers/md/dm-kcopyd.c kc->pages = pl->next; pl 277 drivers/md/dm-kcopyd.c pl->next = *pages; pl 278 drivers/md/dm-kcopyd.c *pages = pl; pl 292 drivers/md/dm-kcopyd.c static void drop_pages(struct page_list *pl) pl 296 drivers/md/dm-kcopyd.c while (pl) { pl 297 drivers/md/dm-kcopyd.c next = pl->next; pl 298 drivers/md/dm-kcopyd.c free_pl(pl); pl 299 drivers/md/dm-kcopyd.c pl = next; pl 309 drivers/md/dm-kcopyd.c struct page_list *pl = NULL, *next; pl 314 drivers/md/dm-kcopyd.c if (pl) pl 315 drivers/md/dm-kcopyd.c drop_pages(pl); pl 318 drivers/md/dm-kcopyd.c next->next = pl; pl 319 drivers/md/dm-kcopyd.c pl = next; pl 323 drivers/md/dm-kcopyd.c kcopyd_put_pages(kc, pl); pl 557 drivers/md/dm-kcopyd.c .mem.ptr.pl = job->pages, pl 579 drivers/media/platform/exynos4-is/fimc-capture.c const struct fimc_pix_limit *pl = var->pix_limit; pl 607 drivers/media/platform/exynos4-is/fimc-capture.c pl->scaler_dis_w : pl->scaler_en_w; pl 624 drivers/media/platform/exynos4-is/fimc-capture.c max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w; pl 656 drivers/media/platform/exynos4-is/fimc-capture.c const struct fimc_pix_limit *pl = var->pix_limit; pl 695 drivers/media/platform/exynos4-is/fimc-capture.c rotate ? pl->out_rot_en_w : pl->out_rot_dis_w, pl 70 drivers/media/platform/qcom/venus/core.h struct hfi_profile_level pl[HFI_MAX_PROFILE_COUNT]; pl 610 drivers/media/platform/qcom/venus/hfi_cmds.c struct hfi_profile_level *in = pdata, *pl = prop_data; pl 612 drivers/media/platform/qcom/venus/hfi_cmds.c pl->level = in->level; pl 613 drivers/media/platform/qcom/venus/hfi_cmds.c pl->profile = in->profile; pl 614 drivers/media/platform/qcom/venus/hfi_cmds.c if (pl->profile <= 0) pl 616 drivers/media/platform/qcom/venus/hfi_cmds.c pl->profile = HFI_H264_PROFILE_HIGH; pl 618 drivers/media/platform/qcom/venus/hfi_cmds.c if (!pl->level) pl 620 drivers/media/platform/qcom/venus/hfi_cmds.c pl->level = 1; pl 622 drivers/media/platform/qcom/venus/hfi_cmds.c pkt->shdr.hdr.size += sizeof(u32) + sizeof(*pl); pl 87 drivers/media/platform/qcom/venus/hfi_parser.c const struct hfi_profile_level *pl = data; pl 89 drivers/media/platform/qcom/venus/hfi_parser.c memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl)); pl 96 drivers/media/platform/qcom/venus/hfi_parser.c struct hfi_profile_level_supported *pl = data; pl 97 drivers/media/platform/qcom/venus/hfi_parser.c struct hfi_profile_level *proflevel = pl->profile_level; pl 100 drivers/media/platform/qcom/venus/hfi_parser.c if (pl->profile_count > HFI_MAX_PROFILE_COUNT) pl 103 drivers/media/platform/qcom/venus/hfi_parser.c memcpy(pl_arr, proflevel, pl->profile_count * sizeof(*proflevel)); pl 106 drivers/media/platform/qcom/venus/hfi_parser.c fill_profile_level, pl_arr, pl->profile_count); pl 645 drivers/media/platform/qcom/venus/venc.c struct hfi_profile_level pl; pl 812 drivers/media/platform/qcom/venus/venc.c pl.profile = profile; pl 813 drivers/media/platform/qcom/venus/venc.c pl.level = level; pl 815 drivers/media/platform/qcom/venus/venc.c ret = hfi_session_set_property(inst, ptype, &pl); pl 392 drivers/misc/cxl/file.c struct cxl_event_afu_driver_reserved *pl) pl 395 drivers/misc/cxl/file.c if (!pl) { pl 396 drivers/misc/cxl/file.c ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL); pl 401 drivers/misc/cxl/file.c event->header.size += pl->data_size; pl 403 drivers/misc/cxl/file.c ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL); pl 409 drivers/misc/cxl/file.c ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT); pl 415 drivers/misc/cxl/file.c if (copy_to_user(buf, &pl->data, pl->data_size)) { pl 416 drivers/misc/cxl/file.c ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT); pl 420 drivers/misc/cxl/file.c ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */ pl 428 drivers/misc/cxl/file.c struct cxl_event_afu_driver_reserved *pl = NULL; pl 476 drivers/misc/cxl/file.c pl = ctx->afu_driver_ops->fetch_event(ctx); pl 510 drivers/misc/cxl/file.c return afu_driver_event_copy(ctx, buf, &event, pl); pl 88 drivers/net/ethernet/brocade/bna/bfi.h u32 pl[BFI_MBMSG_SZ]; pl 76 drivers/net/phy/phylink.c #define phylink_printk(level, pl, fmt, ...) \ pl 78 drivers/net/phy/phylink.c if ((pl)->config->type == PHYLINK_NETDEV) \ pl 79 drivers/net/phy/phylink.c netdev_printk(level, (pl)->netdev, fmt, ##__VA_ARGS__); \ pl 80 drivers/net/phy/phylink.c else if ((pl)->config->type == PHYLINK_DEV) \ pl 81 drivers/net/phy/phylink.c dev_printk(level, (pl)->dev, fmt, ##__VA_ARGS__); \ pl 84 drivers/net/phy/phylink.c #define phylink_err(pl, fmt, ...) \ pl 85 drivers/net/phy/phylink.c phylink_printk(KERN_ERR, pl, fmt, ##__VA_ARGS__) pl 86 drivers/net/phy/phylink.c #define phylink_warn(pl, fmt, ...) \ pl 87 drivers/net/phy/phylink.c phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__) pl 88 drivers/net/phy/phylink.c #define phylink_info(pl, fmt, ...) \ pl 89 drivers/net/phy/phylink.c phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__) pl 91 drivers/net/phy/phylink.c #define phylink_dbg(pl, fmt, ...) \ pl 93 drivers/net/phy/phylink.c if ((pl)->config->type == PHYLINK_NETDEV) \ pl 94 drivers/net/phy/phylink.c netdev_dbg((pl)->netdev, fmt, ##__VA_ARGS__); \ pl 95 drivers/net/phy/phylink.c else if ((pl)->config->type == PHYLINK_DEV) \ pl 96 drivers/net/phy/phylink.c dev_dbg((pl)->dev, fmt, ##__VA_ARGS__); \ pl 99 drivers/net/phy/phylink.c #define phylink_dbg(pl, fmt, ...) \ pl 100 drivers/net/phy/phylink.c phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__) pl 102 drivers/net/phy/phylink.c #define phylink_dbg(pl, fmt, ...) \ pl 105 drivers/net/phy/phylink.c phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__); \ pl 152 drivers/net/phy/phylink.c static int phylink_validate(struct phylink *pl, unsigned long *supported, pl 155 drivers/net/phy/phylink.c pl->ops->validate(pl->config, supported, state); pl 160 drivers/net/phy/phylink.c static int phylink_parse_fixedlink(struct phylink *pl, pl 173 drivers/net/phy/phylink.c pl->link_config.speed = speed; pl 174 drivers/net/phy/phylink.c pl->link_config.duplex = DUPLEX_HALF; pl 177 drivers/net/phy/phylink.c pl->link_config.duplex = DUPLEX_FULL; pl 182 drivers/net/phy/phylink.c pl->link_config.pause |= MLO_PAUSE_SYM; pl 184 drivers/net/phy/phylink.c pl->link_config.pause |= MLO_PAUSE_ASYM; pl 191 drivers/net/phy/phylink.c pl->link_gpio = desc; pl 205 drivers/net/phy/phylink.c phylink_err(pl, "broken fixed-link?\n"); pl 212 drivers/net/phy/phylink.c pl->link_config.duplex = prop[1] ? pl 214 drivers/net/phy/phylink.c pl->link_config.speed = prop[2]; pl 216 drivers/net/phy/phylink.c pl->link_config.pause |= MLO_PAUSE_SYM; pl 218 drivers/net/phy/phylink.c pl->link_config.pause |= MLO_PAUSE_ASYM; pl 222 drivers/net/phy/phylink.c if (pl->link_config.speed > SPEED_1000 && pl 223 drivers/net/phy/phylink.c pl->link_config.duplex != DUPLEX_FULL) pl 224 drivers/net/phy/phylink.c phylink_warn(pl, "fixed link specifies half duplex for %dMbps link?\n", pl 225 drivers/net/phy/phylink.c pl->link_config.speed); pl 227 drivers/net/phy/phylink.c bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS); pl 228 drivers/net/phy/phylink.c linkmode_copy(pl->link_config.advertising, pl->supported); pl 229 drivers/net/phy/phylink.c phylink_validate(pl, pl->supported, &pl->link_config); pl 231 drivers/net/phy/phylink.c s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex, pl 232 drivers/net/phy/phylink.c pl->supported, true); pl 233 drivers/net/phy/phylink.c linkmode_zero(pl->supported); pl 234 drivers/net/phy/phylink.c phylink_set(pl->supported, MII); pl 235 drivers/net/phy/phylink.c phylink_set(pl->supported, Pause); pl 236 drivers/net/phy/phylink.c phylink_set(pl->supported, Asym_Pause); pl 238 drivers/net/phy/phylink.c __set_bit(s->bit, pl->supported); pl 240 drivers/net/phy/phylink.c phylink_warn(pl, "fixed link %s duplex %dMbps not recognised\n", pl 241 drivers/net/phy/phylink.c pl->link_config.duplex == DUPLEX_FULL ? "full" : "half", pl 242 drivers/net/phy/phylink.c pl->link_config.speed); pl 245 drivers/net/phy/phylink.c linkmode_and(pl->link_config.advertising, pl->link_config.advertising, pl 246 drivers/net/phy/phylink.c pl->supported); pl 248 drivers/net/phy/phylink.c pl->link_config.link = 1; pl 249 drivers/net/phy/phylink.c pl->link_config.an_complete = 1; pl 254 drivers/net/phy/phylink.c static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode) pl 261 drivers/net/phy/phylink.c pl->link_an_mode = MLO_AN_FIXED; pl 266 drivers/net/phy/phylink.c if (pl->link_an_mode == MLO_AN_FIXED) { pl 267 drivers/net/phy/phylink.c phylink_err(pl, pl 272 drivers/net/phy/phylink.c linkmode_zero(pl->supported); pl 273 drivers/net/phy/phylink.c phylink_set(pl->supported, MII); pl 274 drivers/net/phy/phylink.c phylink_set(pl->supported, Autoneg); pl 275 drivers/net/phy/phylink.c phylink_set(pl->supported, Asym_Pause); pl 276 drivers/net/phy/phylink.c phylink_set(pl->supported, Pause); pl 277 drivers/net/phy/phylink.c pl->link_config.an_enabled = true; pl 278 drivers/net/phy/phylink.c pl->link_an_mode = MLO_AN_INBAND; pl 280 drivers/net/phy/phylink.c switch (pl->link_config.interface) { pl 282 drivers/net/phy/phylink.c phylink_set(pl->supported, 10baseT_Half); pl 283 drivers/net/phy/phylink.c phylink_set(pl->supported, 10baseT_Full); pl 284 drivers/net/phy/phylink.c phylink_set(pl->supported, 100baseT_Half); pl 285 drivers/net/phy/phylink.c phylink_set(pl->supported, 100baseT_Full); pl 286 drivers/net/phy/phylink.c phylink_set(pl->supported, 1000baseT_Half); pl 287 drivers/net/phy/phylink.c phylink_set(pl->supported, 1000baseT_Full); pl 291 drivers/net/phy/phylink.c phylink_set(pl->supported, 1000baseX_Full); pl 295 drivers/net/phy/phylink.c phylink_set(pl->supported, 2500baseX_Full); pl 299 drivers/net/phy/phylink.c phylink_set(pl->supported, 10baseT_Half); pl 300 drivers/net/phy/phylink.c phylink_set(pl->supported, 10baseT_Full); pl 301 drivers/net/phy/phylink.c phylink_set(pl->supported, 100baseT_Half); pl 302 drivers/net/phy/phylink.c phylink_set(pl->supported, 100baseT_Full); pl 303 drivers/net/phy/phylink.c phylink_set(pl->supported, 1000baseT_Half); pl 304 drivers/net/phy/phylink.c phylink_set(pl->supported, 1000baseT_Full); pl 305 drivers/net/phy/phylink.c phylink_set(pl->supported, 1000baseX_Full); pl 306 drivers/net/phy/phylink.c phylink_set(pl->supported, 10000baseKR_Full); pl 307 drivers/net/phy/phylink.c phylink_set(pl->supported, 10000baseCR_Full); pl 308 drivers/net/phy/phylink.c phylink_set(pl->supported, 10000baseSR_Full); pl 309 drivers/net/phy/phylink.c phylink_set(pl->supported, 10000baseLR_Full); pl 310 drivers/net/phy/phylink.c phylink_set(pl->supported, 10000baseLRM_Full); pl 311 drivers/net/phy/phylink.c phylink_set(pl->supported, 10000baseER_Full); pl 315 drivers/net/phy/phylink.c phylink_err(pl, pl 317 drivers/net/phy/phylink.c phy_modes(pl->link_config.interface)); pl 321 drivers/net/phy/phylink.c linkmode_copy(pl->link_config.advertising, pl->supported); pl 323 drivers/net/phy/phylink.c if (phylink_validate(pl, pl->supported, &pl->link_config)) { pl 324 drivers/net/phy/phylink.c phylink_err(pl, pl 333 drivers/net/phy/phylink.c static void phylink_mac_config(struct phylink *pl, pl 336 drivers/net/phy/phylink.c phylink_dbg(pl, pl 338 drivers/net/phy/phylink.c __func__, phylink_an_mode_str(pl->link_an_mode), pl 345 drivers/net/phy/phylink.c pl->ops->mac_config(pl->config, pl->link_an_mode, state); pl 348 drivers/net/phy/phylink.c static void phylink_mac_config_up(struct phylink *pl, pl 352 drivers/net/phy/phylink.c phylink_mac_config(pl, state); pl 355 drivers/net/phy/phylink.c static void phylink_mac_an_restart(struct phylink *pl) pl 357 drivers/net/phy/phylink.c if (pl->link_config.an_enabled && pl 358 drivers/net/phy/phylink.c phy_interface_mode_is_8023z(pl->link_config.interface)) pl 359 drivers/net/phy/phylink.c pl->ops->mac_an_restart(pl->config); pl 362 drivers/net/phy/phylink.c static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *state) pl 365 drivers/net/phy/phylink.c linkmode_copy(state->advertising, pl->link_config.advertising); pl 367 drivers/net/phy/phylink.c state->interface = pl->link_config.interface; pl 368 drivers/net/phy/phylink.c state->an_enabled = pl->link_config.an_enabled; pl 375 drivers/net/phy/phylink.c return pl->ops->mac_link_state(pl->config, state); pl 381 drivers/net/phy/phylink.c static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state) pl 383 drivers/net/phy/phylink.c *state = pl->link_config; pl 384 drivers/net/phy/phylink.c if (pl->get_fixed_state) pl 385 drivers/net/phy/phylink.c pl->get_fixed_state(pl->netdev, state); pl 386 drivers/net/phy/phylink.c else if (pl->link_gpio) pl 387 drivers/net/phy/phylink.c state->link = !!gpiod_get_value_cansleep(pl->link_gpio); pl 398 drivers/net/phy/phylink.c static void phylink_resolve_flow(struct phylink *pl, pl 403 drivers/net/phy/phylink.c if (pl->link_config.pause & MLO_PAUSE_AN) { pl 406 drivers/net/phy/phylink.c if (phylink_test(pl->link_config.advertising, Pause)) pl 408 drivers/net/phy/phylink.c if (phylink_test(pl->link_config.advertising, Asym_Pause)) pl 419 drivers/net/phy/phylink.c new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; pl 440 drivers/net/phy/phylink.c static void phylink_mac_link_up(struct phylink *pl, pl 443 drivers/net/phy/phylink.c struct net_device *ndev = pl->netdev; pl 445 drivers/net/phy/phylink.c pl->cur_interface = link_state.interface; pl 446 drivers/net/phy/phylink.c pl->ops->mac_link_up(pl->config, pl->link_an_mode, pl 447 drivers/net/phy/phylink.c pl->cur_interface, pl->phydev); pl 452 drivers/net/phy/phylink.c phylink_info(pl, pl 459 drivers/net/phy/phylink.c static void phylink_mac_link_down(struct phylink *pl) pl 461 drivers/net/phy/phylink.c struct net_device *ndev = pl->netdev; pl 465 drivers/net/phy/phylink.c pl->ops->mac_link_down(pl->config, pl->link_an_mode, pl 466 drivers/net/phy/phylink.c pl->cur_interface); pl 467 drivers/net/phy/phylink.c phylink_info(pl, "Link is Down\n"); pl 472 drivers/net/phy/phylink.c struct phylink *pl = container_of(w, struct phylink, resolve); pl 474 drivers/net/phy/phylink.c struct net_device *ndev = pl->netdev; pl 477 drivers/net/phy/phylink.c mutex_lock(&pl->state_mutex); pl 478 drivers/net/phy/phylink.c if (pl->phylink_disable_state) { pl 479 drivers/net/phy/phylink.c pl->mac_link_dropped = false; pl 481 drivers/net/phy/phylink.c } else if (pl->mac_link_dropped) { pl 484 drivers/net/phy/phylink.c switch (pl->link_an_mode) { pl 486 drivers/net/phy/phylink.c link_state = pl->phy_state; pl 487 drivers/net/phy/phylink.c phylink_resolve_flow(pl, &link_state); pl 488 drivers/net/phy/phylink.c phylink_mac_config_up(pl, &link_state); pl 492 drivers/net/phy/phylink.c phylink_get_fixed_state(pl, &link_state); pl 493 drivers/net/phy/phylink.c phylink_mac_config_up(pl, &link_state); pl 497 drivers/net/phy/phylink.c phylink_get_mac_state(pl, &link_state); pl 501 drivers/net/phy/phylink.c if (pl->phydev) pl 502 drivers/net/phy/phylink.c link_state.link &= pl->phy_state.link; pl 505 drivers/net/phy/phylink.c if (pl->phydev && pl->phy_state.link) { pl 506 drivers/net/phy/phylink.c link_state.interface = pl->phy_state.interface; pl 510 drivers/net/phy/phylink.c link_state.pause |= pl->phy_state.pause; pl 511 drivers/net/phy/phylink.c phylink_resolve_flow(pl, &link_state); pl 512 drivers/net/phy/phylink.c phylink_mac_config(pl, &link_state); pl 518 drivers/net/phy/phylink.c if (pl->netdev) pl 521 drivers/net/phy/phylink.c link_changed = (link_state.link != pl->old_link_state); pl 524 drivers/net/phy/phylink.c pl->old_link_state = link_state.link; pl 526 drivers/net/phy/phylink.c phylink_mac_link_down(pl); pl 528 drivers/net/phy/phylink.c phylink_mac_link_up(pl, link_state); pl 530 drivers/net/phy/phylink.c if (!link_state.link && pl->mac_link_dropped) { pl 531 drivers/net/phy/phylink.c pl->mac_link_dropped = false; pl 532 drivers/net/phy/phylink.c queue_work(system_power_efficient_wq, &pl->resolve); pl 534 drivers/net/phy/phylink.c mutex_unlock(&pl->state_mutex); pl 537 drivers/net/phy/phylink.c static void phylink_run_resolve(struct phylink *pl) pl 539 drivers/net/phy/phylink.c if (!pl->phylink_disable_state) pl 540 drivers/net/phy/phylink.c queue_work(system_power_efficient_wq, &pl->resolve); pl 543 drivers/net/phy/phylink.c static void phylink_run_resolve_and_disable(struct phylink *pl, int bit) pl 545 drivers/net/phy/phylink.c unsigned long state = pl->phylink_disable_state; pl 547 drivers/net/phy/phylink.c set_bit(bit, &pl->phylink_disable_state); pl 549 drivers/net/phy/phylink.c queue_work(system_power_efficient_wq, &pl->resolve); pl 550 drivers/net/phy/phylink.c flush_work(&pl->resolve); pl 556 drivers/net/phy/phylink.c struct phylink *pl = container_of(t, struct phylink, link_poll); pl 560 drivers/net/phy/phylink.c phylink_run_resolve(pl); pl 565 drivers/net/phy/phylink.c static int phylink_register_sfp(struct phylink *pl, pl 580 drivers/net/phy/phylink.c phylink_err(pl, "unable to parse \"sfp\" node: %d\n", pl 585 drivers/net/phy/phylink.c pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl, &sfp_phylink_ops); pl 586 drivers/net/phy/phylink.c if (!pl->sfp_bus) pl 613 drivers/net/phy/phylink.c struct phylink *pl; pl 616 drivers/net/phy/phylink.c pl = kzalloc(sizeof(*pl), GFP_KERNEL); pl 617 drivers/net/phy/phylink.c if (!pl) pl 620 drivers/net/phy/phylink.c mutex_init(&pl->state_mutex); pl 621 drivers/net/phy/phylink.c INIT_WORK(&pl->resolve, phylink_resolve); pl 623 drivers/net/phy/phylink.c pl->config = config; pl 625 drivers/net/phy/phylink.c pl->netdev = to_net_dev(config->dev); pl 627 drivers/net/phy/phylink.c pl->dev = config->dev; pl 629 drivers/net/phy/phylink.c kfree(pl); pl 633 drivers/net/phy/phylink.c pl->phy_state.interface = iface; pl 634 drivers/net/phy/phylink.c pl->link_interface = iface; pl 636 drivers/net/phy/phylink.c pl->link_port = PORT_BNC; pl 638 drivers/net/phy/phylink.c pl->link_port = PORT_MII; pl 639 drivers/net/phy/phylink.c pl->link_config.interface = iface; pl 640 drivers/net/phy/phylink.c pl->link_config.pause = MLO_PAUSE_AN; pl 641 drivers/net/phy/phylink.c pl->link_config.speed = SPEED_UNKNOWN; pl 642 drivers/net/phy/phylink.c pl->link_config.duplex = DUPLEX_UNKNOWN; pl 643 drivers/net/phy/phylink.c pl->link_config.an_enabled = true; pl 644 drivers/net/phy/phylink.c pl->ops = ops; pl 645 drivers/net/phy/phylink.c __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); pl 646 drivers/net/phy/phylink.c timer_setup(&pl->link_poll, phylink_fixed_poll, 0); pl 648 drivers/net/phy/phylink.c bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS); pl 649 drivers/net/phy/phylink.c linkmode_copy(pl->link_config.advertising, pl->supported); pl 650 drivers/net/phy/phylink.c phylink_validate(pl, pl->supported, &pl->link_config); pl 652 drivers/net/phy/phylink.c ret = phylink_parse_mode(pl, fwnode); pl 654 drivers/net/phy/phylink.c kfree(pl); pl 658 drivers/net/phy/phylink.c if (pl->link_an_mode == MLO_AN_FIXED) { pl 659 drivers/net/phy/phylink.c ret = phylink_parse_fixedlink(pl, fwnode); pl 661 drivers/net/phy/phylink.c kfree(pl); pl 666 drivers/net/phy/phylink.c ret = phylink_register_sfp(pl, fwnode); pl 668 drivers/net/phy/phylink.c kfree(pl); pl 672 drivers/net/phy/phylink.c return pl; pl 685 drivers/net/phy/phylink.c void phylink_destroy(struct phylink *pl) pl 687 drivers/net/phy/phylink.c if (pl->sfp_bus) pl 688 drivers/net/phy/phylink.c sfp_unregister_upstream(pl->sfp_bus); pl 689 drivers/net/phy/phylink.c if (pl->link_gpio) pl 690 drivers/net/phy/phylink.c gpiod_put(pl->link_gpio); pl 692 drivers/net/phy/phylink.c cancel_work_sync(&pl->resolve); pl 693 drivers/net/phy/phylink.c kfree(pl); pl 700 drivers/net/phy/phylink.c struct phylink *pl = phydev->phylink; pl 702 drivers/net/phy/phylink.c mutex_lock(&pl->state_mutex); pl 703 drivers/net/phy/phylink.c pl->phy_state.speed = phydev->speed; pl 704 drivers/net/phy/phylink.c pl->phy_state.duplex = phydev->duplex; pl 705 drivers/net/phy/phylink.c pl->phy_state.pause = MLO_PAUSE_NONE; pl 707 drivers/net/phy/phylink.c pl->phy_state.pause |= MLO_PAUSE_SYM; pl 709 drivers/net/phy/phylink.c pl->phy_state.pause |= MLO_PAUSE_ASYM; pl 710 drivers/net/phy/phylink.c pl->phy_state.interface = phydev->interface; pl 711 drivers/net/phy/phylink.c pl->phy_state.link = up; pl 712 drivers/net/phy/phylink.c mutex_unlock(&pl->state_mutex); pl 714 drivers/net/phy/phylink.c phylink_run_resolve(pl); pl 716 drivers/net/phy/phylink.c phylink_dbg(pl, "phy link %s %s/%s/%s\n", up ? "up" : "down", pl 722 drivers/net/phy/phylink.c static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) pl 731 drivers/net/phy/phylink.c config.interface = pl->link_config.interface; pl 745 drivers/net/phy/phylink.c ret = phylink_validate(pl, supported, &config); pl 749 drivers/net/phy/phylink.c phy->phylink = pl; pl 752 drivers/net/phy/phylink.c phylink_info(pl, pl 757 drivers/net/phy/phylink.c mutex_lock(&pl->state_mutex); pl 758 drivers/net/phy/phylink.c pl->phydev = phy; pl 759 drivers/net/phy/phylink.c linkmode_copy(pl->supported, supported); pl 760 drivers/net/phy/phylink.c linkmode_copy(pl->link_config.advertising, config.advertising); pl 764 drivers/net/phy/phylink.c mutex_unlock(&pl->state_mutex); pl 767 drivers/net/phy/phylink.c phylink_dbg(pl, pl 769 drivers/net/phy/phylink.c __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported, pl 778 drivers/net/phy/phylink.c static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy, pl 783 drivers/net/phy/phylink.c if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED || pl 784 drivers/net/phy/phylink.c (pl->link_an_mode == MLO_AN_INBAND && pl 788 drivers/net/phy/phylink.c if (pl->phydev) pl 791 drivers/net/phy/phylink.c ret = phy_attach_direct(pl->netdev, phy, 0, interface); pl 795 drivers/net/phy/phylink.c ret = phylink_bringup_phy(pl, phy); pl 817 drivers/net/phy/phylink.c int phylink_connect_phy(struct phylink *pl, struct phy_device *phy) pl 820 drivers/net/phy/phylink.c if (pl->link_interface == PHY_INTERFACE_MODE_NA) { pl 821 drivers/net/phy/phylink.c pl->link_interface = phy->interface; pl 822 drivers/net/phy/phylink.c pl->link_config.interface = pl->link_interface; pl 825 drivers/net/phy/phylink.c return __phylink_connect_phy(pl, phy, pl->link_interface); pl 841 drivers/net/phy/phylink.c int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn, pl 849 drivers/net/phy/phylink.c if (pl->link_an_mode == MLO_AN_FIXED || pl 850 drivers/net/phy/phylink.c (pl->link_an_mode == MLO_AN_INBAND && pl 851 drivers/net/phy/phylink.c phy_interface_mode_is_8023z(pl->link_interface))) pl 861 drivers/net/phy/phylink.c if (pl->link_an_mode == MLO_AN_PHY) pl 866 drivers/net/phy/phylink.c phy_dev = of_phy_attach(pl->netdev, phy_node, flags, pl 867 drivers/net/phy/phylink.c pl->link_interface); pl 874 drivers/net/phy/phylink.c ret = phylink_bringup_phy(pl, phy_dev); pl 889 drivers/net/phy/phylink.c void phylink_disconnect_phy(struct phylink *pl) pl 895 drivers/net/phy/phylink.c phy = pl->phydev; pl 898 drivers/net/phy/phylink.c mutex_lock(&pl->state_mutex); pl 899 drivers/net/phy/phylink.c pl->phydev = NULL; pl 900 drivers/net/phy/phylink.c mutex_unlock(&pl->state_mutex); pl 902 drivers/net/phy/phylink.c flush_work(&pl->resolve); pl 917 drivers/net/phy/phylink.c int phylink_fixed_state_cb(struct phylink *pl, pl 924 drivers/net/phy/phylink.c if (pl->link_an_mode != MLO_AN_FIXED) pl 927 drivers/net/phy/phylink.c mutex_lock(&pl->state_mutex); pl 928 drivers/net/phy/phylink.c pl->get_fixed_state = cb; pl 929 drivers/net/phy/phylink.c mutex_unlock(&pl->state_mutex); pl 943 drivers/net/phy/phylink.c void phylink_mac_change(struct phylink *pl, bool up) pl 946 drivers/net/phy/phylink.c pl->mac_link_dropped = true; pl 947 drivers/net/phy/phylink.c phylink_run_resolve(pl); pl 948 drivers/net/phy/phylink.c phylink_dbg(pl, "mac link %s\n", up ? "up" : "down"); pl 954 drivers/net/phy/phylink.c struct phylink *pl = data; pl 956 drivers/net/phy/phylink.c phylink_run_resolve(pl); pl 969 drivers/net/phy/phylink.c void phylink_start(struct phylink *pl) pl 973 drivers/net/phy/phylink.c phylink_info(pl, "configuring for %s/%s link mode\n", pl 974 drivers/net/phy/phylink.c phylink_an_mode_str(pl->link_an_mode), pl 975 drivers/net/phy/phylink.c phy_modes(pl->link_config.interface)); pl 978 drivers/net/phy/phylink.c if (pl->netdev) pl 979 drivers/net/phy/phylink.c netif_carrier_off(pl->netdev); pl 985 drivers/net/phy/phylink.c phylink_resolve_flow(pl, &pl->link_config); pl 986 drivers/net/phy/phylink.c phylink_mac_config(pl, &pl->link_config); pl 992 drivers/net/phy/phylink.c phylink_mac_an_restart(pl); pl 994 drivers/net/phy/phylink.c clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); pl 995 drivers/net/phy/phylink.c phylink_run_resolve(pl); pl 997 drivers/net/phy/phylink.c if (pl->link_an_mode == MLO_AN_FIXED && pl->link_gpio) { pl 998 drivers/net/phy/phylink.c int irq = gpiod_to_irq(pl->link_gpio); pl 1004 drivers/net/phy/phylink.c "netdev link", pl)) pl 1005 drivers/net/phy/phylink.c pl->link_irq = irq; pl 1010 drivers/net/phy/phylink.c mod_timer(&pl->link_poll, jiffies + HZ); pl 1012 drivers/net/phy/phylink.c if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state) pl 1013 drivers/net/phy/phylink.c mod_timer(&pl->link_poll, jiffies + HZ); pl 1014 drivers/net/phy/phylink.c if (pl->phydev) pl 1015 drivers/net/phy/phylink.c phy_start(pl->phydev); pl 1016 drivers/net/phy/phylink.c if (pl->sfp_bus) pl 1017 drivers/net/phy/phylink.c sfp_upstream_start(pl->sfp_bus); pl 1030 drivers/net/phy/phylink.c void phylink_stop(struct phylink *pl) pl 1034 drivers/net/phy/phylink.c if (pl->sfp_bus) pl 1035 drivers/net/phy/phylink.c sfp_upstream_stop(pl->sfp_bus); pl 1036 drivers/net/phy/phylink.c if (pl->phydev) pl 1037 drivers/net/phy/phylink.c phy_stop(pl->phydev); pl 1038 drivers/net/phy/phylink.c del_timer_sync(&pl->link_poll); pl 1039 drivers/net/phy/phylink.c if (pl->link_irq) { pl 1040 drivers/net/phy/phylink.c free_irq(pl->link_irq, pl); pl 1041 drivers/net/phy/phylink.c pl->link_irq = 0; pl 1044 drivers/net/phy/phylink.c phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED); pl 1057 drivers/net/phy/phylink.c void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol) pl 1064 drivers/net/phy/phylink.c if (pl->phydev) pl 1065 drivers/net/phy/phylink.c phy_ethtool_get_wol(pl->phydev, wol); pl 1080 drivers/net/phy/phylink.c int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol) pl 1086 drivers/net/phy/phylink.c if (pl->phydev) pl 1087 drivers/net/phy/phylink.c ret = phy_ethtool_set_wol(pl->phydev, wol); pl 1124 drivers/net/phy/phylink.c int phylink_ethtool_ksettings_get(struct phylink *pl, pl 1131 drivers/net/phy/phylink.c if (pl->phydev) { pl 1132 drivers/net/phy/phylink.c phy_ethtool_ksettings_get(pl->phydev, kset); pl 1134 drivers/net/phy/phylink.c kset->base.port = pl->link_port; pl 1137 drivers/net/phy/phylink.c linkmode_copy(kset->link_modes.supported, pl->supported); pl 1139 drivers/net/phy/phylink.c switch (pl->link_an_mode) { pl 1145 drivers/net/phy/phylink.c phylink_get_fixed_state(pl, &link_state); pl 1153 drivers/net/phy/phylink.c if (pl->phydev) pl 1156 drivers/net/phy/phylink.c phylink_get_mac_state(pl, &link_state); pl 1175 drivers/net/phy/phylink.c int phylink_ethtool_ksettings_set(struct phylink *pl, pl 1189 drivers/net/phy/phylink.c linkmode_copy(support, pl->supported); pl 1190 drivers/net/phy/phylink.c config = pl->link_config; pl 1211 drivers/net/phy/phylink.c if (pl->link_an_mode == MLO_AN_FIXED && pl 1212 drivers/net/phy/phylink.c (s->speed != pl->link_config.speed || pl 1213 drivers/net/phy/phylink.c s->duplex != pl->link_config.duplex)) pl 1223 drivers/net/phy/phylink.c if (pl->link_an_mode == MLO_AN_FIXED) pl 1233 drivers/net/phy/phylink.c if (phylink_validate(pl, support, &config)) pl 1246 drivers/net/phy/phylink.c if (pl->phydev) { pl 1247 drivers/net/phy/phylink.c ret = phy_ethtool_ksettings_set(pl->phydev, &our_kset); pl 1252 drivers/net/phy/phylink.c mutex_lock(&pl->state_mutex); pl 1254 drivers/net/phy/phylink.c linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); pl 1255 drivers/net/phy/phylink.c pl->link_config.interface = config.interface; pl 1256 drivers/net/phy/phylink.c pl->link_config.speed = our_kset.base.speed; pl 1257 drivers/net/phy/phylink.c pl->link_config.duplex = our_kset.base.duplex; pl 1258 drivers/net/phy/phylink.c pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; pl 1265 drivers/net/phy/phylink.c if (pl->link_an_mode == MLO_AN_INBAND && pl 1266 drivers/net/phy/phylink.c !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { pl 1267 drivers/net/phy/phylink.c phylink_mac_config(pl, &pl->link_config); pl 1268 drivers/net/phy/phylink.c phylink_mac_an_restart(pl); pl 1270 drivers/net/phy/phylink.c mutex_unlock(&pl->state_mutex); pl 1287 drivers/net/phy/phylink.c int phylink_ethtool_nway_reset(struct phylink *pl) pl 1293 drivers/net/phy/phylink.c if (pl->phydev) pl 1294 drivers/net/phy/phylink.c ret = phy_restart_aneg(pl->phydev); pl 1295 drivers/net/phy/phylink.c phylink_mac_an_restart(pl); pl 1306 drivers/net/phy/phylink.c void phylink_ethtool_get_pauseparam(struct phylink *pl, pl 1311 drivers/net/phy/phylink.c pause->autoneg = !!(pl->link_config.pause & MLO_PAUSE_AN); pl 1312 drivers/net/phy/phylink.c pause->rx_pause = !!(pl->link_config.pause & MLO_PAUSE_RX); pl 1313 drivers/net/phy/phylink.c pause->tx_pause = !!(pl->link_config.pause & MLO_PAUSE_TX); pl 1322 drivers/net/phy/phylink.c int phylink_ethtool_set_pauseparam(struct phylink *pl, pl 1325 drivers/net/phy/phylink.c struct phylink_link_state *config = &pl->link_config; pl 1329 drivers/net/phy/phylink.c if (!phylink_test(pl->supported, Pause) && pl 1330 drivers/net/phy/phylink.c !phylink_test(pl->supported, Asym_Pause)) pl 1333 drivers/net/phy/phylink.c if (!phylink_test(pl->supported, Asym_Pause) && pl 1350 drivers/net/phy/phylink.c if (pl->phydev) { pl 1351 drivers/net/phy/phylink.c phy_set_asym_pause(pl->phydev, pause->rx_pause, pl 1354 drivers/net/phy/phylink.c &pl->phylink_disable_state)) { pl 1355 drivers/net/phy/phylink.c switch (pl->link_an_mode) { pl 1358 drivers/net/phy/phylink.c phylink_resolve_flow(pl, config); pl 1359 drivers/net/phy/phylink.c phylink_mac_config(pl, config); pl 1363 drivers/net/phy/phylink.c phylink_mac_config(pl, config); pl 1364 drivers/net/phy/phylink.c phylink_mac_an_restart(pl); pl 1383 drivers/net/phy/phylink.c int phylink_get_eee_err(struct phylink *pl) pl 1389 drivers/net/phy/phylink.c if (pl->phydev) pl 1390 drivers/net/phy/phylink.c ret = phy_get_eee_err(pl->phydev); pl 1403 drivers/net/phy/phylink.c int phylink_init_eee(struct phylink *pl, bool clk_stop_enable) pl 1407 drivers/net/phy/phylink.c if (pl->phydev) pl 1408 drivers/net/phy/phylink.c ret = phy_init_eee(pl->phydev, clk_stop_enable); pl 1419 drivers/net/phy/phylink.c int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee) pl 1425 drivers/net/phy/phylink.c if (pl->phydev) pl 1426 drivers/net/phy/phylink.c ret = phy_ethtool_get_eee(pl->phydev, eee); pl 1437 drivers/net/phy/phylink.c int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee) pl 1443 drivers/net/phy/phylink.c if (pl->phydev) pl 1444 drivers/net/phy/phylink.c ret = phy_ethtool_set_eee(pl->phydev, eee); pl 1475 drivers/net/phy/phylink.c static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, pl 1478 drivers/net/phy/phylink.c struct phy_device *phydev = pl->phydev; pl 1512 drivers/net/phy/phylink.c return mdiobus_read(pl->phydev->mdio.bus, prtad, devad); pl 1515 drivers/net/phy/phylink.c static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, pl 1518 drivers/net/phy/phylink.c struct phy_device *phydev = pl->phydev; pl 1556 drivers/net/phy/phylink.c static int phylink_mii_read(struct phylink *pl, unsigned int phy_id, pl 1562 drivers/net/phy/phylink.c switch (pl->link_an_mode) { pl 1565 drivers/net/phy/phylink.c phylink_get_fixed_state(pl, &state); pl 1575 drivers/net/phy/phylink.c val = phylink_get_mac_state(pl, &state); pl 1587 drivers/net/phy/phylink.c static int phylink_mii_write(struct phylink *pl, unsigned int phy_id, pl 1590 drivers/net/phy/phylink.c switch (pl->link_an_mode) { pl 1622 drivers/net/phy/phylink.c int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) pl 1629 drivers/net/phy/phylink.c if (pl->phydev) { pl 1633 drivers/net/phy/phylink.c mii->phy_id = pl->phydev->mdio.addr; pl 1637 drivers/net/phy/phylink.c ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); pl 1645 drivers/net/phy/phylink.c ret = phylink_phy_write(pl, mii->phy_id, mii->reg_num, pl 1650 drivers/net/phy/phylink.c ret = phy_mii_ioctl(pl->phydev, ifr, cmd); pl 1660 drivers/net/phy/phylink.c ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); pl 1668 drivers/net/phy/phylink.c ret = phylink_mii_write(pl, mii->phy_id, mii->reg_num, pl 1684 drivers/net/phy/phylink.c struct phylink *pl = upstream; pl 1686 drivers/net/phy/phylink.c pl->netdev->sfp_bus = bus; pl 1691 drivers/net/phy/phylink.c struct phylink *pl = upstream; pl 1693 drivers/net/phy/phylink.c pl->netdev->sfp_bus = NULL; pl 1699 drivers/net/phy/phylink.c struct phylink *pl = upstream; pl 1710 drivers/net/phy/phylink.c sfp_parse_support(pl->sfp_bus, id, support); pl 1711 drivers/net/phy/phylink.c port = sfp_parse_port(pl->sfp_bus, id, support); pl 1719 drivers/net/phy/phylink.c config.an_enabled = pl->link_config.an_enabled; pl 1722 drivers/net/phy/phylink.c ret = phylink_validate(pl, support, &config); pl 1724 drivers/net/phy/phylink.c phylink_err(pl, "validation with support %*pb failed: %d\n", pl 1731 drivers/net/phy/phylink.c iface = sfp_select_interface(pl->sfp_bus, id, config.advertising); pl 1733 drivers/net/phy/phylink.c phylink_err(pl, pl 1740 drivers/net/phy/phylink.c ret = phylink_validate(pl, support1, &config); pl 1742 drivers/net/phy/phylink.c phylink_err(pl, "validation of %s/%s with support %*pb failed: %d\n", pl 1749 drivers/net/phy/phylink.c phylink_dbg(pl, "requesting link mode %s/%s with support %*pb\n", pl 1754 drivers/net/phy/phylink.c if (phy_interface_mode_is_8023z(iface) && pl->phydev) pl 1757 drivers/net/phy/phylink.c changed = !bitmap_equal(pl->supported, support, pl 1760 drivers/net/phy/phylink.c linkmode_copy(pl->supported, support); pl 1761 drivers/net/phy/phylink.c linkmode_copy(pl->link_config.advertising, config.advertising); pl 1764 drivers/net/phy/phylink.c if (pl->link_an_mode != MLO_AN_INBAND || pl 1765 drivers/net/phy/phylink.c pl->link_config.interface != config.interface) { pl 1766 drivers/net/phy/phylink.c pl->link_config.interface = config.interface; pl 1767 drivers/net/phy/phylink.c pl->link_an_mode = MLO_AN_INBAND; pl 1771 drivers/net/phy/phylink.c phylink_info(pl, "switched to %s/%s link mode\n", pl 1776 drivers/net/phy/phylink.c pl->link_port = port; pl 1779 drivers/net/phy/phylink.c &pl->phylink_disable_state)) pl 1780 drivers/net/phy/phylink.c phylink_mac_config(pl, &pl->link_config); pl 1787 drivers/net/phy/phylink.c struct phylink *pl = upstream; pl 1791 drivers/net/phy/phylink.c phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK); pl 1796 drivers/net/phy/phylink.c struct phylink *pl = upstream; pl 1800 drivers/net/phy/phylink.c clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); pl 1801 drivers/net/phy/phylink.c phylink_run_resolve(pl); pl 1806 drivers/net/phy/phylink.c struct phylink *pl = upstream; pl 1808 drivers/net/phy/phylink.c return __phylink_connect_phy(upstream, phy, pl->link_config.interface); pl 493 drivers/net/wimax/i2400m/control.c while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl, pl 961 drivers/net/wimax/i2400m/control.c tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack), pl 1045 drivers/net/wimax/i2400m/control.c tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack), pl 271 drivers/pci/pci.h bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, pl 273 drivers/pci/pci.h bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl, pl 275 drivers/pci/pci.h int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout); pl 66 drivers/phy/phy-core.c struct phy_lookup *pl; pl 71 drivers/phy/phy-core.c pl = kzalloc(sizeof(*pl), GFP_KERNEL); pl 72 drivers/phy/phy-core.c if (!pl) pl 75 drivers/phy/phy-core.c pl->dev_id = dev_id; pl 76 drivers/phy/phy-core.c pl->con_id = con_id; pl 77 drivers/phy/phy-core.c pl->phy = phy; pl 80 drivers/phy/phy-core.c list_add_tail(&pl->node, &phys); pl 98 drivers/phy/phy-core.c struct phy_lookup *pl; pl 104 drivers/phy/phy-core.c list_for_each_entry(pl, &phys, node) pl 105 drivers/phy/phy-core.c if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) && pl 106 drivers/phy/phy-core.c !strcmp(pl->con_id, con_id)) { pl 107 drivers/phy/phy-core.c list_del(&pl->node); pl 108 drivers/phy/phy-core.c kfree(pl); pl 118 drivers/phy/phy-core.c struct phy_lookup *p, *pl = NULL; pl 123 drivers/phy/phy-core.c pl = p; pl 128 drivers/phy/phy-core.c return pl ? pl->phy : ERR_PTR(-ENODEV); pl 43 drivers/s390/scsi/zfcp_dbf.c struct zfcp_dbf_pay *pl = &dbf->pay_buf; pl 47 drivers/s390/scsi/zfcp_dbf.c memset(pl, 0, sizeof(*pl)); pl 48 drivers/s390/scsi/zfcp_dbf.c pl->fsf_req_id = req_id; pl 49 drivers/s390/scsi/zfcp_dbf.c memcpy(pl->area, area, ZFCP_DBF_TAG_LEN); pl 54 drivers/s390/scsi/zfcp_dbf.c memcpy(pl->data, data + offset, rec_length); pl 55 drivers/s390/scsi/zfcp_dbf.c debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length)); pl 58 drivers/s390/scsi/zfcp_dbf.c pl->counter++; pl 192 drivers/s390/scsi/zfcp_dbf.c void **pl) pl 203 drivers/s390/scsi/zfcp_dbf.c if (!pl) pl 215 drivers/s390/scsi/zfcp_dbf.c while (payload->counter < scount && (char *)pl[payload->counter]) { pl 216 drivers/s390/scsi/zfcp_dbf.c memcpy(payload->data, (char *)pl[payload->counter], length); pl 93 drivers/s390/scsi/zfcp_qdio.c void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1]; pl 98 drivers/s390/scsi/zfcp_qdio.c memset(pl, 0, pl 109 drivers/s390/scsi/zfcp_qdio.c pl[sbal_no] = qdio->res_q[sbal_idx]; pl 111 drivers/s390/scsi/zfcp_qdio.c zfcp_dbf_hba_def_err(adapter, req_id, scount, pl); pl 162 drivers/scsi/bfa/bfi.h u32 pl[BFI_LMSG_PL_WSZ]; pl 171 drivers/scsi/bfa/bfi.h u32 pl[BFI_MBMSG_SZ]; pl 2683 drivers/scsi/csiostor/csio_hw.c uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A); pl 2711 drivers/scsi/csiostor/csio_hw.c pl &= (~SF_F); pl 2712 drivers/scsi/csiostor/csio_hw.c csio_wr_reg32(hw, pl, PL_INT_ENABLE_A); pl 1840 drivers/scsi/libfc/fc_rport.c struct fc_els_flogi *pl; pl 1850 drivers/scsi/libfc/fc_rport.c pl = fc_frame_payload_get(fp, sizeof(*pl)); pl 1851 drivers/scsi/libfc/fc_rport.c if (!pl) { pl 1871 drivers/scsi/libfc/fc_rport.c rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn); pl 1872 drivers/scsi/libfc/fc_rport.c rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn); pl 1929 drivers/scsi/libfc/fc_rport.c rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs); pl 1934 drivers/scsi/libfc/fc_rport.c fp = fc_frame_alloc(lport, sizeof(*pl)); pl 1499 drivers/staging/media/ipu3/ipu3-css-params.c imgu_css_acc_process_lines(const struct process_lines *pl, pl 1507 drivers/staging/media/ipu3/ipu3-css-params.c pl->y_start + pl->grid_height * pl->block_height; pl 1509 drivers/staging/media/ipu3/ipu3-css-params.c pl->grid_height_per_slice * pl->block_height; pl 1518 drivers/staging/media/ipu3/ipu3-css-params.c if (pl->grid_height_per_slice == 0) pl 1521 drivers/staging/media/ipu3/ipu3-css-params.c if (pl->acc_enable && grid_last_line > pl->image_height) pl 1524 drivers/staging/media/ipu3/ipu3-css-params.c num_of_sets = pl->grid_height / pl->grid_height_per_slice; pl 1525 drivers/staging/media/ipu3/ipu3-css-params.c if (num_of_sets * pl->grid_height_per_slice < pl->grid_height) pl 1529 drivers/staging/media/ipu3/ipu3-css-params.c if (pl->max_op == IMGU_ABI_AF_MAX_OPERATIONS) { pl 1530 drivers/staging/media/ipu3/ipu3-css-params.c first_process_lines = process_lines + pl->y_start + 2; pl 1535 drivers/staging/media/ipu3/ipu3-css-params.c pl->image_height - grid_last_line - 4; pl 1537 drivers/staging/media/ipu3/ipu3-css-params.c first_process_lines = process_lines + pl->y_start; pl 1541 drivers/staging/media/ipu3/ipu3-css-params.c process_lines_after_grid = pl->image_height - grid_last_line; pl 1551 drivers/staging/media/ipu3/ipu3-css-params.c if (op_idx >= pl->max_op || tr_idx >= pl->max_tr) pl 1591 drivers/staging/media/ipu3/ipu3-css-params.c if (op_idx >= pl->max_op || pl_idx >= pl->max_tr) pl 1645 drivers/staging/media/ipu3/ipu3-css-params.c struct process_lines pl = { pl 1659 drivers/staging/media/ipu3/ipu3-css-params.c return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data, pl 1672 drivers/staging/media/ipu3/ipu3-css-params.c struct process_lines pl = { pl 1686 drivers/staging/media/ipu3/ipu3-css-params.c return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data, pl 1699 drivers/staging/media/ipu3/ipu3-css-params.c struct process_lines pl = { pl 1712 drivers/staging/media/ipu3/ipu3-css-params.c return imgu_css_acc_process_lines(&pl, to->ops, to->process_lines_data, pl 770 drivers/staging/rtl8712/rtl871x_ioctl_linux.c struct RT_PMKID_LIST *pl = psecuritypriv->PMKIDList; pl 795 drivers/staging/rtl8712/rtl871x_ioctl_linux.c if (!memcmp(pl[j].Bssid, strIssueBssid, ETH_ALEN)) { pl 801 drivers/staging/rtl8712/rtl871x_ioctl_linux.c memcpy(pl[j].PMKID, pPMK->pmkid, IW_PMKID_LEN); pl 802 drivers/staging/rtl8712/rtl871x_ioctl_linux.c pl[j].bUsed = true; pl 812 drivers/staging/rtl8712/rtl871x_ioctl_linux.c memcpy(pl[psecuritypriv->PMKIDIndex].Bssid, pl 814 drivers/staging/rtl8712/rtl871x_ioctl_linux.c memcpy(pl[psecuritypriv->PMKIDIndex].PMKID, pl 816 drivers/staging/rtl8712/rtl871x_ioctl_linux.c pl[psecuritypriv->PMKIDIndex].bUsed = true; pl 825 drivers/staging/rtl8712/rtl871x_ioctl_linux.c if (!memcmp(pl[j].Bssid, strIssueBssid, ETH_ALEN)) { pl 829 drivers/staging/rtl8712/rtl871x_ioctl_linux.c eth_zero_addr(pl[j].Bssid); pl 830 drivers/staging/rtl8712/rtl871x_ioctl_linux.c pl[j].bUsed = false; pl 197 drivers/target/iscsi/iscsi_target_parameters.c struct iscsi_param_list *pl; pl 199 drivers/target/iscsi/iscsi_target_parameters.c pl = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); pl 200 drivers/target/iscsi/iscsi_target_parameters.c if (!pl) { pl 205 drivers/target/iscsi/iscsi_target_parameters.c INIT_LIST_HEAD(&pl->param_list); pl 206 drivers/target/iscsi/iscsi_target_parameters.c INIT_LIST_HEAD(&pl->extra_response_list); pl 219 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, AUTHMETHOD, INITIAL_AUTHMETHOD, pl 225 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, HEADERDIGEST, INITIAL_HEADERDIGEST, pl 231 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, DATADIGEST, INITIAL_DATADIGEST, pl 237 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, MAXCONNECTIONS, pl 244 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, SENDTARGETS, INITIAL_SENDTARGETS, pl 250 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, TARGETNAME, INITIAL_TARGETNAME, pl 256 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, INITIATORNAME, pl 263 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, TARGETALIAS, INITIAL_TARGETALIAS, pl 269 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, INITIATORALIAS, pl 276 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, TARGETADDRESS, pl 283 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, TARGETPORTALGROUPTAG, pl 290 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, INITIALR2T, INITIAL_INITIALR2T, pl 296 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, IMMEDIATEDATA, pl 303 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, MAXXMITDATASEGMENTLENGTH, pl 310 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, MAXRECVDATASEGMENTLENGTH, pl 317 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, MAXBURSTLENGTH, pl 324 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, FIRSTBURSTLENGTH, pl 331 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, DEFAULTTIME2WAIT, pl 338 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, DEFAULTTIME2RETAIN, pl 345 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, MAXOUTSTANDINGR2T, pl 352 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, DATAPDUINORDER, pl 359 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, DATASEQUENCEINORDER, pl 366 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, ERRORRECOVERYLEVEL, pl 373 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, SESSIONTYPE, INITIAL_SESSIONTYPE, pl 379 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, IFMARKER, INITIAL_IFMARKER, pl 385 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, OFMARKER, INITIAL_OFMARKER, pl 391 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, IFMARKINT, INITIAL_IFMARKINT, pl 397 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, OFMARKINT, INITIAL_OFMARKINT, pl 406 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS, pl 412 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, INITIATORRECVDATASEGMENTLENGTH, pl 419 drivers/target/iscsi/iscsi_target_parameters.c param = iscsi_set_default_param(pl, TARGETRECVDATASEGMENTLENGTH, pl 426 drivers/target/iscsi/iscsi_target_parameters.c *param_list_ptr = pl; pl 429 drivers/target/iscsi/iscsi_target_parameters.c iscsi_release_param_list(pl); pl 872 drivers/thermal/tegra/soctherm.c u32 st, ex = 0, cp = 0, gp = 0, pl = 0, me = 0; pl 883 drivers/thermal/tegra/soctherm.c pl |= st & TH_INTR_PD0_MASK; pl 884 drivers/thermal/tegra/soctherm.c pl |= st & TH_INTR_PU0_MASK; pl 889 drivers/thermal/tegra/soctherm.c ex |= cp | gp | pl | me; pl 906 drivers/thermal/tegra/soctherm.c if (pl) { pl 2053 drivers/video/fbdev/amifb.c u_long pl, ps; pl 2058 drivers/video/fbdev/amifb.c ps = pl = ZTWO_PADDR(dummysprite); pl 2068 drivers/video/fbdev/amifb.c pl = ZTWO_PADDR(lofsprite); pl 2079 drivers/video/fbdev/amifb.c swap(pl, ps); pl 2089 drivers/video/fbdev/amifb.c copl[cop_spr0ptrh].w[1] = highw(pl); pl 2090 drivers/video/fbdev/amifb.c copl[cop_spr0ptrl].w[1] = loww(pl); pl 3766 fs/jfs/jfs_dtree.c wchar_t *pl, *pr, *kname; pl 3800 fs/jfs/jfs_dtree.c for (pl = lkey.name, pr = rkey.name; pl 3801 fs/jfs/jfs_dtree.c namlen; pl++, pr++, namlen--, klen++, kname++) { pl 3803 fs/jfs/jfs_dtree.c if (*pl != *pr) { pl 436 include/acpi/acoutput.h #define ACPI_DEBUG_PRINT(pl) pl 437 include/acpi/acoutput.h #define ACPI_DEBUG_PRINT_RAW(pl) pl 99 include/drm/drm_gem_vram_helper.h struct ttm_placement *pl); pl 21 include/linux/ceph/pagelist.h struct ceph_pagelist *pl; /* pagelist, for error checking */ pl 28 include/linux/ceph/pagelist.h extern void ceph_pagelist_release(struct ceph_pagelist *pl); pl 30 include/linux/ceph/pagelist.h extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l); pl 32 include/linux/ceph/pagelist.h extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space); pl 34 include/linux/ceph/pagelist.h extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl); pl 36 include/linux/ceph/pagelist.h extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, pl 39 include/linux/ceph/pagelist.h extern int ceph_pagelist_truncate(struct ceph_pagelist *pl, pl 42 include/linux/ceph/pagelist.h static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) pl 45 include/linux/ceph/pagelist.h return ceph_pagelist_append(pl, &ev, sizeof(ev)); pl 47 include/linux/ceph/pagelist.h static inline int ceph_pagelist_encode_32(struct ceph_pagelist *pl, u32 v) pl 50 include/linux/ceph/pagelist.h return ceph_pagelist_append(pl, &ev, sizeof(ev)); pl 52 include/linux/ceph/pagelist.h static inline int ceph_pagelist_encode_16(struct ceph_pagelist *pl, u16 v) pl 55 include/linux/ceph/pagelist.h return ceph_pagelist_append(pl, &ev, sizeof(ev)); pl 57 include/linux/ceph/pagelist.h static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v) pl 59 include/linux/ceph/pagelist.h return ceph_pagelist_append(pl, &v, 1); pl 61 include/linux/ceph/pagelist.h static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl, pl 64 include/linux/ceph/pagelist.h int ret = ceph_pagelist_encode_32(pl, len); pl 68 include/linux/ceph/pagelist.h return ceph_pagelist_append(pl, s, len); pl 43 include/linux/dm-io.h struct page_list *pl; pl 56 include/linux/flex_proportions.h int fprop_local_init_single(struct fprop_local_single *pl); pl 57 include/linux/flex_proportions.h void fprop_local_destroy_single(struct fprop_local_single *pl); pl 58 include/linux/flex_proportions.h void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl); pl 60 include/linux/flex_proportions.h struct fprop_local_single *pl, unsigned long *numerator, pl 64 include/linux/flex_proportions.h void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl) pl 69 include/linux/flex_proportions.h __fprop_inc_single(p, pl); pl 84 include/linux/flex_proportions.h int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp); pl 85 include/linux/flex_proportions.h void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); pl 86 include/linux/flex_proportions.h void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); pl 87 include/linux/flex_proportions.h void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, pl 90 include/linux/flex_proportions.h struct fprop_local_percpu *pl, unsigned long *numerator, pl 94 include/linux/flex_proportions.h void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) pl 99 include/linux/flex_proportions.h __fprop_inc_percpu(p, pl); pl 57 include/linux/lightnvm.h u64 pl : NVM_12_PL_BITS; pl 453 include/linux/lightnvm.h l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; pl 482 include/linux/lightnvm.h l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; pl 506 include/linux/lightnvm.h caddr |= (u64)p.g.pl << ppaf->pln_offset; pl 541 include/linux/lightnvm.h ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> pl 582 include/linux/lightnvm.h ppa32 |= ppa64.g.pl << ppaf->pln_offset; pl 613 include/linux/lightnvm.h int pl = ppa->g.pl; pl 616 include/linux/lightnvm.h pl++; pl 617 include/linux/lightnvm.h if (pl == geo->num_pln) pl 620 include/linux/lightnvm.h ppa->g.pl = pl; pl 194 include/net/dsa.h struct phylink *pl; pl 427 include/uapi/linux/wimax/i2400m.h __u8 pl[0]; pl 438 include/uapi/linux/wimax/i2400m.h struct i2400m_tlv_hdr pl[0]; pl 48 kernel/bpf/cgroup.c struct bpf_prog_list *pl, *tmp; pl 50 kernel/bpf/cgroup.c list_for_each_entry_safe(pl, tmp, progs, node) { pl 51 kernel/bpf/cgroup.c list_del(&pl->node); pl 52 kernel/bpf/cgroup.c bpf_prog_put(pl->prog); pl 54 kernel/bpf/cgroup.c bpf_cgroup_storage_unlink(pl->storage[stype]); pl 55 kernel/bpf/cgroup.c bpf_cgroup_storage_free(pl->storage[stype]); pl 57 kernel/bpf/cgroup.c kfree(pl); pl 93 kernel/bpf/cgroup.c struct bpf_prog_list *pl; pl 96 kernel/bpf/cgroup.c list_for_each_entry(pl, head, node) { pl 97 kernel/bpf/cgroup.c if (!pl->prog) pl 144 kernel/bpf/cgroup.c struct bpf_prog_list *pl; pl 166 kernel/bpf/cgroup.c list_for_each_entry(pl, &p->bpf.progs[type], node) { pl 167 kernel/bpf/cgroup.c if (!pl->prog) pl 170 kernel/bpf/cgroup.c progs->items[cnt].prog = pl->prog; pl 173 kernel/bpf/cgroup.c pl->storage[stype]; pl 309 kernel/bpf/cgroup.c struct bpf_prog_list *pl; pl 341 kernel/bpf/cgroup.c list_for_each_entry(pl, progs, node) { pl 342 kernel/bpf/cgroup.c if (pl->prog == prog) { pl 350 kernel/bpf/cgroup.c pl = kmalloc(sizeof(*pl), GFP_KERNEL); pl 351 kernel/bpf/cgroup.c if (!pl) { pl 358 kernel/bpf/cgroup.c pl->prog = prog; pl 360 kernel/bpf/cgroup.c pl->storage[stype] = storage[stype]; pl 361 kernel/bpf/cgroup.c list_add_tail(&pl->node, progs); pl 364 kernel/bpf/cgroup.c pl = kmalloc(sizeof(*pl), GFP_KERNEL); pl 365 kernel/bpf/cgroup.c if (!pl) { pl 371 kernel/bpf/cgroup.c list_add_tail(&pl->node, progs); pl 373 kernel/bpf/cgroup.c pl = list_first_entry(progs, typeof(*pl), node); pl 374 kernel/bpf/cgroup.c old_prog = pl->prog; pl 376 kernel/bpf/cgroup.c old_storage[stype] = pl->storage[stype]; pl 381 kernel/bpf/cgroup.c pl->prog = prog; pl 383 kernel/bpf/cgroup.c pl->storage[stype] = storage[stype]; pl 408 kernel/bpf/cgroup.c pl->prog = old_prog; pl 410 kernel/bpf/cgroup.c bpf_cgroup_storage_free(pl->storage[stype]); pl 411 kernel/bpf/cgroup.c pl->storage[stype] = old_storage[stype]; pl 415 kernel/bpf/cgroup.c list_del(&pl->node); pl 416 kernel/bpf/cgroup.c kfree(pl); pl 437 kernel/bpf/cgroup.c struct bpf_prog_list *pl; pl 454 kernel/bpf/cgroup.c list_for_each_entry(pl, progs, node) { pl 455 kernel/bpf/cgroup.c if (pl->prog != prog) pl 461 kernel/bpf/cgroup.c pl->prog = NULL; pl 470 kernel/bpf/cgroup.c pl = list_first_entry(progs, typeof(*pl), node); pl 471 kernel/bpf/cgroup.c old_prog = pl->prog; pl 472 kernel/bpf/cgroup.c pl->prog = NULL; pl 480 kernel/bpf/cgroup.c list_del(&pl->node); pl 482 kernel/bpf/cgroup.c bpf_cgroup_storage_unlink(pl->storage[stype]); pl 483 kernel/bpf/cgroup.c bpf_cgroup_storage_free(pl->storage[stype]); pl 485 kernel/bpf/cgroup.c kfree(pl); pl 496 kernel/bpf/cgroup.c pl->prog = old_prog; pl 534 kernel/bpf/cgroup.c struct bpf_prog_list *pl; pl 538 kernel/bpf/cgroup.c list_for_each_entry(pl, progs, node) { pl 539 kernel/bpf/cgroup.c id = pl->prog->aux->id; pl 94 lib/flex_proportions.c int fprop_local_init_single(struct fprop_local_single *pl) pl 96 lib/flex_proportions.c pl->events = 0; pl 97 lib/flex_proportions.c pl->period = 0; pl 98 lib/flex_proportions.c raw_spin_lock_init(&pl->lock); pl 102 lib/flex_proportions.c void fprop_local_destroy_single(struct fprop_local_single *pl) pl 107 lib/flex_proportions.c struct fprop_local_single *pl) pl 113 lib/flex_proportions.c if (pl->period == period) pl 115 lib/flex_proportions.c raw_spin_lock_irqsave(&pl->lock, flags); pl 117 lib/flex_proportions.c if (pl->period >= period) { pl 118 lib/flex_proportions.c raw_spin_unlock_irqrestore(&pl->lock, flags); pl 122 lib/flex_proportions.c if (period - pl->period < BITS_PER_LONG) pl 123 lib/flex_proportions.c pl->events >>= period - pl->period; pl 125 lib/flex_proportions.c pl->events = 0; pl 126 lib/flex_proportions.c pl->period = period; pl 127 lib/flex_proportions.c raw_spin_unlock_irqrestore(&pl->lock, flags); pl 131 lib/flex_proportions.c void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl) pl 133 lib/flex_proportions.c fprop_reflect_period_single(p, pl); pl 134 lib/flex_proportions.c pl->events++; pl 140 lib/flex_proportions.c struct fprop_local_single *pl, pl 148 lib/flex_proportions.c fprop_reflect_period_single(p, pl); pl 149 lib/flex_proportions.c num = pl->events; pl 172 lib/flex_proportions.c int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp) pl 176 lib/flex_proportions.c err = percpu_counter_init(&pl->events, 0, gfp); pl 179 lib/flex_proportions.c pl->period = 0; pl 180 lib/flex_proportions.c raw_spin_lock_init(&pl->lock); pl 184 lib/flex_proportions.c void fprop_local_destroy_percpu(struct fprop_local_percpu *pl) pl 186 lib/flex_proportions.c percpu_counter_destroy(&pl->events); pl 190 lib/flex_proportions.c struct fprop_local_percpu *pl) pl 196 lib/flex_proportions.c if (pl->period == period) pl 198 lib/flex_proportions.c raw_spin_lock_irqsave(&pl->lock, flags); pl 200 lib/flex_proportions.c if (pl->period >= period) { pl 201 lib/flex_proportions.c raw_spin_unlock_irqrestore(&pl->lock, flags); pl 205 lib/flex_proportions.c if (period - pl->period < BITS_PER_LONG) { pl 206 lib/flex_proportions.c s64 val = percpu_counter_read(&pl->events); pl 209 lib/flex_proportions.c val = percpu_counter_sum(&pl->events); pl 211 lib/flex_proportions.c percpu_counter_add_batch(&pl->events, pl 212 lib/flex_proportions.c -val + (val >> (period-pl->period)), PROP_BATCH); pl 214 lib/flex_proportions.c percpu_counter_set(&pl->events, 0); pl 215 lib/flex_proportions.c pl->period = period; pl 216 lib/flex_proportions.c raw_spin_unlock_irqrestore(&pl->lock, flags); pl 220 lib/flex_proportions.c void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) pl 222 lib/flex_proportions.c fprop_reflect_period_percpu(p, pl); pl 223 lib/flex_proportions.c percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); pl 228 lib/flex_proportions.c struct fprop_local_percpu *pl, pl 236 lib/flex_proportions.c fprop_reflect_period_percpu(p, pl); pl 237 lib/flex_proportions.c num = percpu_counter_read_positive(&pl->events); pl 260 lib/flex_proportions.c struct fprop_local_percpu *pl, int max_frac) pl 265 lib/flex_proportions.c fprop_fraction_percpu(p, pl, &numerator, &denominator); pl 270 lib/flex_proportions.c fprop_reflect_period_percpu(p, pl); pl 271 lib/flex_proportions.c percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); pl 154 lib/mpi/longlong.h #define umul_ppmm(ph, pl, m0, m1) \ pl 158 lib/mpi/longlong.h (pl) = __m0 * __m1; \ pl 281 lib/mpi/longlong.h #define umul_ppmm(ph, pl, m0, m1) \ pl 284 lib/mpi/longlong.h "=r" ((USItype)(pl)) \ pl 786 lib/mpi/longlong.h #define umul_ppmm(ph, pl, m0, m1) \ pl 793 lib/mpi/longlong.h (pl) = __m0 * __m1; \ pl 796 lib/mpi/longlong.h #define smul_ppmm(ph, pl, m0, m1) \ pl 803 lib/mpi/longlong.h (pl) = __m0 * __m1; \ pl 892 lib/mpi/longlong.h #define umul_ppmm(ph, pl, m0, m1) \ pl 917 lib/mpi/longlong.h "=r" ((USItype)(pl)) \ pl 1232 lib/mpi/longlong.h #define umul_ppmm(ph, pl, m0, m1) \ pl 1236 lib/mpi/longlong.h pl = (UWtype) __ll; \ pl 4714 net/ceph/osd_client.c struct ceph_pagelist *pl; pl 4719 net/ceph/osd_client.c pl = ceph_pagelist_alloc(GFP_NOIO); pl 4720 net/ceph/osd_client.c if (!pl) pl 4723 net/ceph/osd_client.c ret = ceph_pagelist_encode_64(pl, notify_id); pl 4724 net/ceph/osd_client.c ret |= ceph_pagelist_encode_64(pl, cookie); pl 4726 net/ceph/osd_client.c ret |= ceph_pagelist_encode_32(pl, payload_len); pl 4727 net/ceph/osd_client.c ret |= ceph_pagelist_append(pl, payload, payload_len); pl 4729 net/ceph/osd_client.c ret |= ceph_pagelist_encode_32(pl, 0); pl 4732 net/ceph/osd_client.c ceph_pagelist_release(pl); pl 4736 net/ceph/osd_client.c ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl); pl 4737 net/ceph/osd_client.c op->indata_len = pl->length; pl 4783 net/ceph/osd_client.c struct ceph_pagelist *pl; pl 4789 net/ceph/osd_client.c pl = ceph_pagelist_alloc(GFP_NOIO); pl 4790 net/ceph/osd_client.c if (!pl) pl 4793 net/ceph/osd_client.c ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */ pl 4794 net/ceph/osd_client.c ret |= ceph_pagelist_encode_32(pl, timeout); pl 4795 net/ceph/osd_client.c ret |= ceph_pagelist_encode_32(pl, payload_len); pl 4796 net/ceph/osd_client.c ret |= ceph_pagelist_append(pl, payload, payload_len); pl 4798 net/ceph/osd_client.c ceph_pagelist_release(pl); pl 4802 net/ceph/osd_client.c ceph_osd_data_pagelist_init(&op->notify.request_data, pl); pl 4803 net/ceph/osd_client.c op->indata_len = pl->length; pl 11 net/ceph/pagelist.c struct ceph_pagelist *pl; pl 13 net/ceph/pagelist.c pl = kmalloc(sizeof(*pl), gfp_flags); pl 14 net/ceph/pagelist.c if (!pl) pl 17 net/ceph/pagelist.c INIT_LIST_HEAD(&pl->head); pl 18 net/ceph/pagelist.c pl->mapped_tail = NULL; pl 19 net/ceph/pagelist.c pl->length = 0; pl 20 net/ceph/pagelist.c pl->room = 0; pl 21 net/ceph/pagelist.c INIT_LIST_HEAD(&pl->free_list); pl 22 net/ceph/pagelist.c pl->num_pages_free = 0; pl 23 net/ceph/pagelist.c refcount_set(&pl->refcnt, 1); pl 25 net/ceph/pagelist.c return pl; pl 29 net/ceph/pagelist.c static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl) pl 31 net/ceph/pagelist.c if (pl->mapped_tail) { pl 32 net/ceph/pagelist.c struct page *page = list_entry(pl->head.prev, struct page, lru); pl 34 net/ceph/pagelist.c pl->mapped_tail = NULL; pl 38 net/ceph/pagelist.c void ceph_pagelist_release(struct ceph_pagelist *pl) pl 40 net/ceph/pagelist.c if (!refcount_dec_and_test(&pl->refcnt)) pl 42 net/ceph/pagelist.c ceph_pagelist_unmap_tail(pl); pl 43 net/ceph/pagelist.c while (!list_empty(&pl->head)) { pl 44 net/ceph/pagelist.c struct page *page = list_first_entry(&pl->head, struct page, pl 49 net/ceph/pagelist.c ceph_pagelist_free_reserve(pl); pl 50 net/ceph/pagelist.c kfree(pl); pl 54 net/ceph/pagelist.c static int ceph_pagelist_addpage(struct ceph_pagelist *pl) pl 58 net/ceph/pagelist.c if (!pl->num_pages_free) { pl 61 net/ceph/pagelist.c page = list_first_entry(&pl->free_list, struct page, lru); pl 63 net/ceph/pagelist.c --pl->num_pages_free; pl 67 net/ceph/pagelist.c pl->room += PAGE_SIZE; pl 68 net/ceph/pagelist.c ceph_pagelist_unmap_tail(pl); pl 69 net/ceph/pagelist.c list_add_tail(&page->lru, &pl->head); pl 70 net/ceph/pagelist.c pl->mapped_tail = kmap(page); pl 74 net/ceph/pagelist.c int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len) pl 76 net/ceph/pagelist.c while (pl->room < len) { pl 77 net/ceph/pagelist.c size_t bit = pl->room; pl 80 net/ceph/pagelist.c memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), pl 82 net/ceph/pagelist.c pl->length += bit; pl 83 net/ceph/pagelist.c pl->room -= bit; pl 86 net/ceph/pagelist.c ret = ceph_pagelist_addpage(pl); pl 91 net/ceph/pagelist.c memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len); pl 92 net/ceph/pagelist.c pl->length += len; pl 93 net/ceph/pagelist.c pl->room -= len; pl 102 net/ceph/pagelist.c int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space) pl 104 net/ceph/pagelist.c if (space <= pl->room) pl 106 net/ceph/pagelist.c space -= pl->room; pl 109 net/ceph/pagelist.c while (space > pl->num_pages_free) { pl 113 net/ceph/pagelist.c list_add_tail(&page->lru, &pl->free_list); pl 114 net/ceph/pagelist.c ++pl->num_pages_free; pl 121 net/ceph/pagelist.c int ceph_pagelist_free_reserve(struct ceph_pagelist *pl) pl 123 net/ceph/pagelist.c while (!list_empty(&pl->free_list)) { pl 124 net/ceph/pagelist.c struct page *page = list_first_entry(&pl->free_list, pl 128 net/ceph/pagelist.c --pl->num_pages_free; pl 130 net/ceph/pagelist.c BUG_ON(pl->num_pages_free); pl 136 net/ceph/pagelist.c void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, pl 139 net/ceph/pagelist.c c->pl = pl; pl 140 net/ceph/pagelist.c c->page_lru = pl->head.prev; pl 141 net/ceph/pagelist.c c->room = pl->room; pl 150 net/ceph/pagelist.c int ceph_pagelist_truncate(struct ceph_pagelist *pl, pl 155 net/ceph/pagelist.c if (pl != c->pl) pl 157 net/ceph/pagelist.c ceph_pagelist_unmap_tail(pl); pl 158 net/ceph/pagelist.c while (pl->head.prev != c->page_lru) { pl 159 net/ceph/pagelist.c page = list_entry(pl->head.prev, struct page, lru); pl 161 net/ceph/pagelist.c list_move_tail(&page->lru, &pl->free_list); pl 162 net/ceph/pagelist.c ++pl->num_pages_free; pl 164 net/ceph/pagelist.c pl->room = c->room; pl 165 net/ceph/pagelist.c if (!list_empty(&pl->head)) { pl 166 net/ceph/pagelist.c page = list_entry(pl->head.prev, struct page, lru); pl 167 net/ceph/pagelist.c pl->mapped_tail = kmap(page); pl 81 net/dsa/port.c if (dp->pl) pl 82 net/dsa/port.c phylink_start(dp->pl); pl 103 net/dsa/port.c if (dp->pl) pl 104 net/dsa/port.c phylink_stop(dp->pl); pl 629 net/dsa/port.c dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), pl 631 net/dsa/port.c if (IS_ERR(dp->pl)) { pl 632 net/dsa/port.c pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); pl 633 net/dsa/port.c return PTR_ERR(dp->pl); pl 636 net/dsa/port.c err = phylink_of_phy_connect(dp->pl, port_dn, 0); pl 645 net/dsa/port.c phylink_destroy(dp->pl); pl 674 net/dsa/port.c if (!ds->ops->adjust_link && dp->pl) { pl 676 net/dsa/port.c phylink_disconnect_phy(dp->pl); pl 678 net/dsa/port.c phylink_destroy(dp->pl); pl 679 net/dsa/port.c dp->pl = NULL; pl 272 net/dsa/slave.c return phylink_mii_ioctl(p->dp->pl, ifr, cmd); pl 598 net/dsa/slave.c return phylink_ethtool_nway_reset(dp->pl); pl 712 net/dsa/slave.c phylink_ethtool_get_wol(dp->pl, w); pl 724 net/dsa/slave.c phylink_ethtool_set_wol(dp->pl, w); pl 739 net/dsa/slave.c if (!dev->phydev || !dp->pl) pl 749 net/dsa/slave.c return phylink_ethtool_set_eee(dp->pl, e); pl 759 net/dsa/slave.c if (!dev->phydev || !dp->pl) pl 769 net/dsa/slave.c return phylink_ethtool_get_eee(dp->pl, e); pl 777 net/dsa/slave.c return phylink_ethtool_ksettings_get(dp->pl, cmd); pl 785 net/dsa/slave.c return phylink_ethtool_ksettings_set(dp->pl, cmd); pl 1258 net/dsa/slave.c phylink_mac_change(dp->pl, up); pl 1286 net/dsa/slave.c return phylink_connect_phy(dp->pl, slave_dev->phydev); pl 1304 net/dsa/slave.c dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode, pl 1306 net/dsa/slave.c if (IS_ERR(dp->pl)) { pl 1308 net/dsa/slave.c "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl)); pl 1309 net/dsa/slave.c return PTR_ERR(dp->pl); pl 1317 net/dsa/slave.c phylink_fixed_state_cb(dp->pl, dsa_slave_phylink_fixed_state); pl 1322 net/dsa/slave.c ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags); pl 1332 net/dsa/slave.c phylink_destroy(dp->pl); pl 1353 net/dsa/slave.c phylink_stop(dp->pl); pl 1369 net/dsa/slave.c phylink_start(dp->pl); pl 1462 net/dsa/slave.c phylink_disconnect_phy(p->dp->pl); pl 1464 net/dsa/slave.c phylink_destroy(p->dp->pl); pl 1479 net/dsa/slave.c phylink_disconnect_phy(dp->pl); pl 1484 net/dsa/slave.c phylink_destroy(dp->pl); pl 451 net/sched/act_csum.c unsigned int *pl) pl 469 net/sched/act_csum.c *pl = ntohl(*(__be32 *)(xh + off + 2)); pl 491 net/sched/act_csum.c unsigned int pl; pl 504 net/sched/act_csum.c pl = ntohs(ip6h->payload_len); pl 522 net/sched/act_csum.c !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl))) pl 530 net/sched/act_csum.c hl, pl + sizeof(*ip6h))) pl 536 net/sched/act_csum.c hl, pl + sizeof(*ip6h))) pl 542 net/sched/act_csum.c pl + sizeof(*ip6h), 0)) pl 548 net/sched/act_csum.c pl + sizeof(*ip6h), 1)) pl 553 net/sched/act_csum.c !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h))) pl 1657 net/tipc/node.c struct tipc_link *l, *tnl, *pl = NULL; pl 1677 net/tipc/node.c pl = n->links[pb_id].link; pl 1714 net/tipc/node.c if (pl && !tipc_link_is_reset(pl)) { pl 1718 net/tipc/node.c tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl), pl 1729 net/tipc/node.c tipc_node_link_failover(n, pl, l, xmitq); pl 1741 net/tipc/node.c if (pl) pl 1742 net/tipc/node.c tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT); pl 1747 net/tipc/node.c if (!pl || !tipc_link_is_up(pl)) pl 1770 net/tipc/node.c tnl = pl; pl 1771 net/tipc/node.c pl = l; pl 1773 net/tipc/node.c inputq_len = skb_queue_len(tipc_link_inputq(pl)); pl 1774 net/tipc/node.c dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len; pl 1780 net/tipc/node.c if (l == pl) pl 848 sound/soc/qcom/qdsp6/q6afe.c void *p, *pl; pl 858 sound/soc/qcom/qdsp6/q6afe.c pl = p + APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata); pl 859 sound/soc/qcom/qdsp6/q6afe.c memcpy(pl, data, psize); pl 896 sound/soc/qcom/qdsp6/q6afe.c void *p, *pl; pl 906 sound/soc/qcom/qdsp6/q6afe.c pl = p + APR_HDR_SIZE + sizeof(*param) + sizeof(*pdata); pl 907 sound/soc/qcom/qdsp6/q6afe.c memcpy(pl, data, psize);