pmu               449 arch/alpha/kernel/perf_event.c 	perf_pmu_disable(event->pmu);
pmu               474 arch/alpha/kernel/perf_event.c 	perf_pmu_enable(event->pmu);
pmu               492 arch/alpha/kernel/perf_event.c 	perf_pmu_disable(event->pmu);
pmu               520 arch/alpha/kernel/perf_event.c 	perf_pmu_enable(event->pmu);
pmu               720 arch/alpha/kernel/perf_event.c static void alpha_pmu_enable(struct pmu *pmu)
pmu               746 arch/alpha/kernel/perf_event.c static void alpha_pmu_disable(struct pmu *pmu)
pmu               759 arch/alpha/kernel/perf_event.c static struct pmu pmu = {
pmu               895 arch/alpha/kernel/perf_event.c 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
pmu                31 arch/arc/kernel/perf_event.c 	struct pmu	pmu;
pmu               228 arch/arc/kernel/perf_event.c static void arc_pmu_enable(struct pmu *pmu)
pmu               236 arch/arc/kernel/perf_event.c static void arc_pmu_disable(struct pmu *pmu)
pmu               408 arch/arc/kernel/perf_event.c 	arc_pmu_disable(&arc_pmu->pmu);
pmu               449 arch/arc/kernel/perf_event.c 	arc_pmu_enable(&arc_pmu->pmu);
pmu               628 arch/arc/kernel/perf_event.c 	arc_pmu->pmu = (struct pmu) {
pmu               657 arch/arc/kernel/perf_event.c 		arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu               663 arch/arc/kernel/perf_event.c 	return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
pmu               117 arch/arm/include/asm/hw_breakpoint.h struct pmu;
pmu               272 arch/arm/kernel/perf_event_v6.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               423 arch/arm/kernel/perf_event_v6.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               458 arch/arm/kernel/perf_event_v6.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               748 arch/arm/kernel/perf_event_v7.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               768 arch/arm/kernel/perf_event_v7.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               875 arch/arm/kernel/perf_event_v7.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               921 arch/arm/kernel/perf_event_v7.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu              1036 arch/arm/kernel/perf_event_v7.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu              1498 arch/arm/kernel/perf_event_v7.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu              1524 arch/arm/kernel/perf_event_v7.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu              1585 arch/arm/kernel/perf_event_v7.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu              1831 arch/arm/kernel/perf_event_v7.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu              1857 arch/arm/kernel/perf_event_v7.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu              1918 arch/arm/kernel/perf_event_v7.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               207 arch/arm/kernel/perf_event_xscale.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               243 arch/arm/kernel/perf_event_xscale.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               553 arch/arm/kernel/perf_event_xscale.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               599 arch/arm/kernel/perf_event_xscale.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu                59 arch/arm/mach-imx/mmdc.c #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
pmu                97 arch/arm/mach-imx/mmdc.c 	struct pmu pmu;
pmu               226 arch/arm/mach-imx/mmdc.c 	perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target);
pmu               233 arch/arm/mach-imx/mmdc.c 					  struct pmu *pmu,
pmu               241 arch/arm/mach-imx/mmdc.c 	if (event->pmu != pmu)
pmu               255 arch/arm/mach-imx/mmdc.c 	struct pmu *pmu = event->pmu;
pmu               263 arch/arm/mach-imx/mmdc.c 		if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask))
pmu               268 arch/arm/mach-imx/mmdc.c 		if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
pmu               277 arch/arm/mach-imx/mmdc.c 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
pmu               280 arch/arm/mach-imx/mmdc.c 	if (event->attr.type != event->pmu->type)
pmu               306 arch/arm/mach-imx/mmdc.c 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
pmu               324 arch/arm/mach-imx/mmdc.c 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
pmu               360 arch/arm/mach-imx/mmdc.c 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
pmu               381 arch/arm/mach-imx/mmdc.c 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
pmu               397 arch/arm/mach-imx/mmdc.c 	struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
pmu               438 arch/arm/mach-imx/mmdc.c 		.pmu = (struct pmu) {
pmu               464 arch/arm/mach-imx/mmdc.c 	perf_pmu_unregister(&pmu_mmdc->pmu);
pmu               514 arch/arm/mach-imx/mmdc.c 	ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
pmu                36 arch/arm/mach-meson/platsmp.c static struct regmap *pmu;
pmu                80 arch/arm/mach-meson/platsmp.c 	pmu = syscon_regmap_lookup_by_compatible(pmu_compatible);
pmu                81 arch/arm/mach-meson/platsmp.c 	if (IS_ERR(pmu)) {
pmu               175 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL1,
pmu               185 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
pmu               225 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0,
pmu               242 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_MEM_PD0,
pmu               250 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL1,
pmu               259 arch/arm/mach-meson/platsmp.c 	ret = regmap_read_poll_timeout(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL1, val,
pmu               268 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
pmu               332 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
pmu               342 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL1,
pmu               374 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0,
pmu               382 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL0, BIT(cpu),
pmu               392 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_CNTL1,
pmu               400 arch/arm/mach-meson/platsmp.c 	ret = regmap_update_bits(pmu, MESON_CPU_AO_RTI_PWR_A9_MEM_PD0,
pmu                35 arch/arm/mach-rockchip/platsmp.c static struct regmap *pmu;
pmu                43 arch/arm/mach-rockchip/platsmp.c 	ret = regmap_read(pmu, PMU_PWRDN_ST, &val);
pmu                85 arch/arm/mach-rockchip/platsmp.c 		ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
pmu               120 arch/arm/mach-rockchip/platsmp.c 	if (!sram_base_addr || (has_pmu && !pmu)) {
pmu               221 arch/arm/mach-rockchip/platsmp.c 	pmu = syscon_regmap_lookup_by_phandle(node, "rockchip,pmu");
pmu               223 arch/arm/mach-rockchip/platsmp.c 	if (!IS_ERR(pmu))
pmu               226 arch/arm/mach-rockchip/platsmp.c 	pmu = syscon_regmap_lookup_by_compatible("rockchip,rk3066-pmu");
pmu               227 arch/arm/mach-rockchip/platsmp.c 	if (!IS_ERR(pmu))
pmu               231 arch/arm/mach-rockchip/platsmp.c 	pmu = NULL;
pmu               245 arch/arm/mach-rockchip/platsmp.c 	pmu = regmap_init_mmio(NULL, pmu_base, &rockchip_pmu_regmap_config);
pmu               246 arch/arm/mach-rockchip/platsmp.c 	if (IS_ERR(pmu)) {
pmu               247 arch/arm/mach-rockchip/platsmp.c 		int ret = PTR_ERR(pmu);
pmu               250 arch/arm/mach-rockchip/platsmp.c 		pmu = NULL;
pmu                21 arch/arm/mm/cache-l2x0-pmu.c static struct pmu *l2x0_pmu;
pmu                93 arch/arm/mm/cache-l2x0-pmu.c static void l2x0_pmu_enable(struct pmu *pmu)
pmu               101 arch/arm/mm/cache-l2x0-pmu.c static void l2x0_pmu_disable(struct pmu *pmu)
pmu               274 arch/arm/mm/cache-l2x0-pmu.c 	struct pmu *pmu = event->pmu;
pmu               279 arch/arm/mm/cache-l2x0-pmu.c 	if (leader->pmu == pmu)
pmu               285 arch/arm/mm/cache-l2x0-pmu.c 		if (sibling->pmu == pmu)
pmu               354 arch/arm/mm/cache-l2x0-pmu.c 	struct pmu *pmu = dev_get_drvdata(dev);
pmu               359 arch/arm/mm/cache-l2x0-pmu.c 	if (!lattr->pl310_only || strcmp("l2c_310", pmu->name) == 0)
pmu               516 arch/arm/mm/cache-l2x0-pmu.c 	*l2x0_pmu = (struct pmu) {
pmu               111 arch/arm64/include/asm/hw_breakpoint.h struct pmu;
pmu               295 arch/arm64/include/asm/kvm_host.h 	struct kvm_pmu pmu;
pmu               296 arch/arm64/kernel/perf_event.c 	struct pmu *pmu = dev_get_drvdata(dev);
pmu               297 arch/arm64/kernel/perf_event.c 	struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
pmu               426 arch/arm64/kernel/perf_event.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               463 arch/arm64/kernel/perf_event.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               611 arch/arm64/kernel/perf_event.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               646 arch/arm64/kernel/perf_event.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               792 arch/arm64/kernel/perf_event.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
pmu               905 arch/arm64/kernel/perf_event.c 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
pmu               957 arch/arm64/kernel/perf_event.c 	struct arm_pmu *pmu;
pmu               964 arch/arm64/kernel/perf_event.c 	struct arm_pmu *cpu_pmu = probe->pmu;
pmu              1001 arch/arm64/kernel/perf_event.c 		.pmu = cpu_pmu,
pmu               564 arch/arm64/kvm/hyp/switch.c 	struct kvm_pmu_events *pmu;
pmu               567 arch/arm64/kvm/hyp/switch.c 	pmu = &host->pmu_events;
pmu               569 arch/arm64/kvm/hyp/switch.c 	if (pmu->events_host)
pmu               570 arch/arm64/kvm/hyp/switch.c 		write_sysreg(pmu->events_host, pmcntenclr_el0);
pmu               572 arch/arm64/kvm/hyp/switch.c 	if (pmu->events_guest)
pmu               573 arch/arm64/kvm/hyp/switch.c 		write_sysreg(pmu->events_guest, pmcntenset_el0);
pmu               575 arch/arm64/kvm/hyp/switch.c 	return (pmu->events_host || pmu->events_guest);
pmu               584 arch/arm64/kvm/hyp/switch.c 	struct kvm_pmu_events *pmu;
pmu               587 arch/arm64/kvm/hyp/switch.c 	pmu = &host->pmu_events;
pmu               589 arch/arm64/kvm/hyp/switch.c 	if (pmu->events_guest)
pmu               590 arch/arm64/kvm/hyp/switch.c 		write_sysreg(pmu->events_guest, pmcntenclr_el0);
pmu               592 arch/arm64/kvm/hyp/switch.c 	if (pmu->events_host)
pmu               593 arch/arm64/kvm/hyp/switch.c 		write_sysreg(pmu->events_host, pmcntenset_el0);
pmu                40 arch/csky/kernel/perf_event.c 	struct pmu			pmu;
pmu                49 arch/csky/kernel/perf_event.c #define to_csky_pmu(p)  (container_of(p, struct csky_pmu, pmu))
pmu              1010 arch/csky/kernel/perf_event.c static void csky_pmu_enable(struct pmu *pmu)
pmu              1016 arch/csky/kernel/perf_event.c static void csky_pmu_disable(struct pmu *pmu)
pmu              1121 arch/csky/kernel/perf_event.c 	csky_pmu_disable(&csky_pmu.pmu);
pmu              1146 arch/csky/kernel/perf_event.c 	csky_pmu_enable(&csky_pmu.pmu);
pmu              1207 arch/csky/kernel/perf_event.c 	csky_pmu.pmu = (struct pmu) {
pmu              1318 arch/csky/kernel/perf_event.c 		csky_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu              1331 arch/csky/kernel/perf_event.c 	ret = perf_pmu_register(&csky_pmu.pmu, "cpu", PERF_TYPE_RAW);
pmu               463 arch/mips/kernel/perf_event_mipsxx.c 	perf_pmu_disable(event->pmu);
pmu               488 arch/mips/kernel/perf_event_mipsxx.c 	perf_pmu_enable(event->pmu);
pmu               518 arch/mips/kernel/perf_event_mipsxx.c static void mipspmu_enable(struct pmu *pmu)
pmu               537 arch/mips/kernel/perf_event_mipsxx.c static void mipspmu_disable(struct pmu *pmu)
pmu               646 arch/mips/kernel/perf_event_mipsxx.c static struct pmu pmu = {
pmu              1827 arch/mips/kernel/perf_event_mipsxx.c 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
pmu                64 arch/nds32/include/asm/pmu.h 	struct pmu pmu;
pmu                89 arch/nds32/include/asm/pmu.h #define to_nds32_pmu(p)			(container_of(p, struct nds32_pmu, pmu))
pmu               185 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
pmu               417 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu);
pmu               468 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu);
pmu               497 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu);
pmu               526 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *cpu_pmu = to_nds32_pmu(event->pmu);
pmu               678 arch/nds32/kernel/perf_event_cpu.c static int probe_current_pmu(struct nds32_pmu *pmu)
pmu               689 arch/nds32/kernel/perf_event_cpu.c 	device_pmu_init(pmu);
pmu               694 arch/nds32/kernel/perf_event_cpu.c static void nds32_pmu_enable(struct pmu *pmu)
pmu               696 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *nds32_pmu = to_nds32_pmu(pmu);
pmu               705 arch/nds32/kernel/perf_event_cpu.c static void nds32_pmu_disable(struct pmu *pmu)
pmu               707 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *nds32_pmu = to_nds32_pmu(pmu);
pmu               751 arch/nds32/kernel/perf_event_cpu.c validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
pmu               754 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
pmu               759 arch/nds32/kernel/perf_event_cpu.c 	if (event->pmu != pmu)
pmu               782 arch/nds32/kernel/perf_event_cpu.c 	if (!validate_event(event->pmu, &fake_pmu, leader))
pmu               786 arch/nds32/kernel/perf_event_cpu.c 		if (!validate_event(event->pmu, &fake_pmu, sibling))
pmu               790 arch/nds32/kernel/perf_event_cpu.c 	if (!validate_event(event->pmu, &fake_pmu, event))
pmu               798 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
pmu               859 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
pmu               890 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
pmu               908 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
pmu               914 arch/nds32/kernel/perf_event_cpu.c 	perf_pmu_disable(event->pmu);
pmu               939 arch/nds32/kernel/perf_event_cpu.c 	perf_pmu_enable(event->pmu);
pmu               945 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
pmu               971 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
pmu               986 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *nds32_pmu = to_nds32_pmu(event->pmu);
pmu              1044 arch/nds32/kernel/perf_event_cpu.c 	nds32_pmu->pmu = (struct pmu) {
pmu              1063 arch/nds32/kernel/perf_event_cpu.c 	return perf_pmu_register(&nds32_pmu->pmu, nds32_pmu->name, type);
pmu              1133 arch/nds32/kernel/perf_event_cpu.c 	struct nds32_pmu *pmu;
pmu              1141 arch/nds32/kernel/perf_event_cpu.c 	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
pmu              1142 arch/nds32/kernel/perf_event_cpu.c 	if (!pmu)
pmu              1148 arch/nds32/kernel/perf_event_cpu.c 		ret = init_fn(pmu);
pmu              1150 arch/nds32/kernel/perf_event_cpu.c 		ret = probe_current_pmu(pmu);
pmu              1158 arch/nds32/kernel/perf_event_cpu.c 	cpu_pmu = pmu;
pmu              1168 arch/nds32/kernel/perf_event_cpu.c 	kfree(pmu);
pmu                43 arch/powerpc/include/asm/hw_breakpoint.h struct pmu;
pmu                63 arch/powerpc/include/asm/hw_breakpoint.h extern struct pmu perf_ops_bp;
pmu               111 arch/powerpc/include/asm/imc-pmu.h 	struct pmu pmu;
pmu               192 arch/powerpc/perf/8xx-pmu.c static struct pmu mpc8xx_pmu = {
pmu               380 arch/powerpc/perf/core-book3s.c 	perf_sched_cb_inc(event->ctx->pmu);
pmu               392 arch/powerpc/perf/core-book3s.c 	perf_sched_cb_dec(event->ctx->pmu);
pmu              1201 arch/powerpc/perf/core-book3s.c static void power_pmu_disable(struct pmu *pmu)
pmu              1274 arch/powerpc/perf/core-book3s.c static void power_pmu_enable(struct pmu *pmu)
pmu              1443 arch/powerpc/perf/core-book3s.c 	if (group->pmu->task_ctx_nr == perf_hw_context) {
pmu              1451 arch/powerpc/perf/core-book3s.c 		if (event->pmu->task_ctx_nr == perf_hw_context &&
pmu              1477 arch/powerpc/perf/core-book3s.c 	perf_pmu_disable(event->pmu);
pmu              1530 arch/powerpc/perf/core-book3s.c 	perf_pmu_enable(event->pmu);
pmu              1545 arch/powerpc/perf/core-book3s.c 	perf_pmu_disable(event->pmu);
pmu              1585 arch/powerpc/perf/core-book3s.c 	perf_pmu_enable(event->pmu);
pmu              1610 arch/powerpc/perf/core-book3s.c 	perf_pmu_disable(event->pmu);
pmu              1622 arch/powerpc/perf/core-book3s.c 	perf_pmu_enable(event->pmu);
pmu              1637 arch/powerpc/perf/core-book3s.c 	perf_pmu_disable(event->pmu);
pmu              1644 arch/powerpc/perf/core-book3s.c 	perf_pmu_enable(event->pmu);
pmu              1657 arch/powerpc/perf/core-book3s.c static void power_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
pmu              1667 arch/powerpc/perf/core-book3s.c 	perf_pmu_disable(pmu);
pmu              1676 arch/powerpc/perf/core-book3s.c static void power_pmu_cancel_txn(struct pmu *pmu)
pmu              1688 arch/powerpc/perf/core-book3s.c 	perf_pmu_enable(pmu);
pmu              1696 arch/powerpc/perf/core-book3s.c static int power_pmu_commit_txn(struct pmu *pmu)
pmu              1723 arch/powerpc/perf/core-book3s.c 	perf_pmu_enable(pmu);
pmu              2017 arch/powerpc/perf/core-book3s.c static struct pmu power_pmu = {
pmu              2275 arch/powerpc/perf/core-book3s.c int register_power_pmu(struct power_pmu *pmu)
pmu              2280 arch/powerpc/perf/core-book3s.c 	ppmu = pmu;
pmu              2282 arch/powerpc/perf/core-book3s.c 		pmu->name);
pmu               203 arch/powerpc/perf/core-fsl-emb.c static void fsl_emb_pmu_disable(struct pmu *pmu)
pmu               242 arch/powerpc/perf/core-fsl-emb.c static void fsl_emb_pmu_enable(struct pmu *pmu)
pmu               297 arch/powerpc/perf/core-fsl-emb.c 	perf_pmu_disable(event->pmu);
pmu               345 arch/powerpc/perf/core-fsl-emb.c 	perf_pmu_enable(event->pmu);
pmu               355 arch/powerpc/perf/core-fsl-emb.c 	perf_pmu_disable(event->pmu);
pmu               383 arch/powerpc/perf/core-fsl-emb.c 	perf_pmu_enable(event->pmu);
pmu               403 arch/powerpc/perf/core-fsl-emb.c 	perf_pmu_disable(event->pmu);
pmu               413 arch/powerpc/perf/core-fsl-emb.c 	perf_pmu_enable(event->pmu);
pmu               428 arch/powerpc/perf/core-fsl-emb.c 	perf_pmu_disable(event->pmu);
pmu               435 arch/powerpc/perf/core-fsl-emb.c 	perf_pmu_enable(event->pmu);
pmu               586 arch/powerpc/perf/core-fsl-emb.c static struct pmu fsl_emb_pmu = {
pmu               707 arch/powerpc/perf/core-fsl-emb.c int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
pmu               712 arch/powerpc/perf/core-fsl-emb.c 	ppmu = pmu;
pmu               714 arch/powerpc/perf/core-fsl-emb.c 		pmu->name);
pmu              1288 arch/powerpc/perf/hv-24x7.c 	if (event->attr.type != event->pmu->type)
pmu              1447 arch/powerpc/perf/hv-24x7.c static void h_24x7_event_start_txn(struct pmu *pmu, unsigned int flags)
pmu              1489 arch/powerpc/perf/hv-24x7.c static int h_24x7_event_commit_txn(struct pmu *pmu)
pmu              1547 arch/powerpc/perf/hv-24x7.c static void h_24x7_event_cancel_txn(struct pmu *pmu)
pmu              1553 arch/powerpc/perf/hv-24x7.c static struct pmu h_24x7_pmu = {
pmu               222 arch/powerpc/perf/hv-gpci.c 	if (event->attr.type != event->pmu->type)
pmu               264 arch/powerpc/perf/hv-gpci.c static struct pmu h_gpci_pmu = {
pmu                49 arch/powerpc/perf/imc-pmu.c 	return container_of(event->pmu, struct imc_pmu, pmu);
pmu                93 arch/powerpc/perf/imc-pmu.c 	struct pmu *pmu = dev_get_drvdata(dev);
pmu                94 arch/powerpc/perf/imc-pmu.c 	struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu);
pmu               209 arch/powerpc/perf/imc-pmu.c static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
pmu               246 arch/powerpc/perf/imc-pmu.c 	pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL);
pmu               247 arch/powerpc/perf/imc-pmu.c 	if (!pmu->events)
pmu               253 arch/powerpc/perf/imc-pmu.c 		ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]);
pmu               261 arch/powerpc/perf/imc-pmu.c 		imc_free_events(pmu->events, ct);
pmu               276 arch/powerpc/perf/imc-pmu.c 		imc_free_events(pmu->events, ct);
pmu               283 arch/powerpc/perf/imc-pmu.c 		ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value);
pmu               284 arch/powerpc/perf/imc-pmu.c 		dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str);
pmu               289 arch/powerpc/perf/imc-pmu.c 		if (pmu->events[i].scale) {
pmu               290 arch/powerpc/perf/imc-pmu.c 			ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name);
pmu               291 arch/powerpc/perf/imc-pmu.c 			dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale);
pmu               298 arch/powerpc/perf/imc-pmu.c 		if (pmu->events[i].unit) {
pmu               299 arch/powerpc/perf/imc-pmu.c 			ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name);
pmu               300 arch/powerpc/perf/imc-pmu.c 			dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit);
pmu               309 arch/powerpc/perf/imc-pmu.c 	pmu->attr_groups[IMC_EVENT_ATTR] = attr_group;
pmu               328 arch/powerpc/perf/imc-pmu.c 		perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
pmu               492 arch/powerpc/perf/imc-pmu.c 	struct imc_pmu *pmu;
pmu               496 arch/powerpc/perf/imc-pmu.c 	if (event->attr.type != event->pmu->type)
pmu               506 arch/powerpc/perf/imc-pmu.c 	pmu = imc_event_to_pmu(event);
pmu               509 arch/powerpc/perf/imc-pmu.c 	if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)
pmu               522 arch/powerpc/perf/imc-pmu.c 	pcni = pmu->mem_info;
pmu               675 arch/powerpc/perf/imc-pmu.c 	if (!core_imc_pmu->pmu.event_init)
pmu               686 arch/powerpc/perf/imc-pmu.c 		perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
pmu               769 arch/powerpc/perf/imc-pmu.c 	struct imc_pmu *pmu;
pmu               772 arch/powerpc/perf/imc-pmu.c 	if (event->attr.type != event->pmu->type)
pmu               783 arch/powerpc/perf/imc-pmu.c 	pmu = imc_event_to_pmu(event);
pmu               786 arch/powerpc/perf/imc-pmu.c 	if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
pmu               896 arch/powerpc/perf/imc-pmu.c 	struct imc_pmu *pmu;
pmu               898 arch/powerpc/perf/imc-pmu.c 	if (event->attr.type != event->pmu->type)
pmu               909 arch/powerpc/perf/imc-pmu.c 	pmu = imc_event_to_pmu(event);
pmu               912 arch/powerpc/perf/imc-pmu.c 	if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size))
pmu               919 arch/powerpc/perf/imc-pmu.c 	event->pmu->task_ctx_nr = perf_sw_context;
pmu               925 arch/powerpc/perf/imc-pmu.c 	if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc")))
pmu               943 arch/powerpc/perf/imc-pmu.c static void thread_imc_pmu_start_txn(struct pmu *pmu,
pmu               948 arch/powerpc/perf/imc-pmu.c 	perf_pmu_disable(pmu);
pmu               951 arch/powerpc/perf/imc-pmu.c static void thread_imc_pmu_cancel_txn(struct pmu *pmu)
pmu               953 arch/powerpc/perf/imc-pmu.c 	perf_pmu_enable(pmu);
pmu               956 arch/powerpc/perf/imc-pmu.c static int thread_imc_pmu_commit_txn(struct pmu *pmu)
pmu               958 arch/powerpc/perf/imc-pmu.c 	perf_pmu_enable(pmu);
pmu              1307 arch/powerpc/perf/imc-pmu.c 	if (event->attr.type != event->pmu->type)
pmu              1320 arch/powerpc/perf/imc-pmu.c 	event->pmu->task_ctx_nr = perf_hw_context;
pmu              1325 arch/powerpc/perf/imc-pmu.c static int update_pmu_ops(struct imc_pmu *pmu)
pmu              1327 arch/powerpc/perf/imc-pmu.c 	pmu->pmu.task_ctx_nr = perf_invalid_context;
pmu              1328 arch/powerpc/perf/imc-pmu.c 	pmu->pmu.add = imc_event_add;
pmu              1329 arch/powerpc/perf/imc-pmu.c 	pmu->pmu.del = imc_event_stop;
pmu              1330 arch/powerpc/perf/imc-pmu.c 	pmu->pmu.start = imc_event_start;
pmu              1331 arch/powerpc/perf/imc-pmu.c 	pmu->pmu.stop = imc_event_stop;
pmu              1332 arch/powerpc/perf/imc-pmu.c 	pmu->pmu.read = imc_event_update;
pmu              1333 arch/powerpc/perf/imc-pmu.c 	pmu->pmu.attr_groups = pmu->attr_groups;
pmu              1334 arch/powerpc/perf/imc-pmu.c 	pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
pmu              1335 arch/powerpc/perf/imc-pmu.c 	pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group;
pmu              1337 arch/powerpc/perf/imc-pmu.c 	switch (pmu->domain) {
pmu              1339 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.event_init = nest_imc_event_init;
pmu              1340 arch/powerpc/perf/imc-pmu.c 		pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
pmu              1343 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.event_init = core_imc_event_init;
pmu              1344 arch/powerpc/perf/imc-pmu.c 		pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group;
pmu              1347 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.event_init = thread_imc_event_init;
pmu              1348 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.add = thread_imc_event_add;
pmu              1349 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.del = thread_imc_event_del;
pmu              1350 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.start_txn = thread_imc_pmu_start_txn;
pmu              1351 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn;
pmu              1352 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.commit_txn = thread_imc_pmu_commit_txn;
pmu              1355 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.event_init = trace_imc_event_init;
pmu              1356 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.add = trace_imc_event_add;
pmu              1357 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.del = trace_imc_event_del;
pmu              1358 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.start = trace_imc_event_start;
pmu              1359 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.stop = trace_imc_event_stop;
pmu              1360 arch/powerpc/perf/imc-pmu.c 		pmu->pmu.read = trace_imc_event_read;
pmu              1361 arch/powerpc/perf/imc-pmu.c 		pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group;
pmu              1522 arch/powerpc/perf/imc-pmu.c 	perf_pmu_unregister(&thread_imc_pmu->pmu);
pmu              1540 arch/powerpc/perf/imc-pmu.c 		pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s);
pmu              1541 arch/powerpc/perf/imc-pmu.c 		if (!pmu_ptr->pmu.name)
pmu              1556 arch/powerpc/perf/imc-pmu.c 		pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
pmu              1557 arch/powerpc/perf/imc-pmu.c 		if (!pmu_ptr->pmu.name)
pmu              1579 arch/powerpc/perf/imc-pmu.c 		pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
pmu              1580 arch/powerpc/perf/imc-pmu.c 		if (!pmu_ptr->pmu.name)
pmu              1596 arch/powerpc/perf/imc-pmu.c 		pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
pmu              1597 arch/powerpc/perf/imc-pmu.c 		if (!pmu_ptr->pmu.name)
pmu              1708 arch/powerpc/perf/imc-pmu.c 	ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1);
pmu              1713 arch/powerpc/perf/imc-pmu.c 							pmu_ptr->pmu.name);
pmu               179 arch/powerpc/platforms/powernv/opal-imc.c 		pr_err("IMC PMU %s Register failed\n", pmu_ptr->pmu.name);
pmu               180 arch/powerpc/platforms/powernv/opal-imc.c 		kfree(pmu_ptr->pmu.name);
pmu               248 arch/powerpc/platforms/powernv/opal-imc.c 	struct imc_pmu *pmu;
pmu               264 arch/powerpc/platforms/powernv/opal-imc.c 		pmu = NULL;
pmu               296 arch/powerpc/platforms/powernv/opal-imc.c 		pmu = imc_pmu_create(imc_dev, pmu_count, domain);
pmu               297 arch/powerpc/platforms/powernv/opal-imc.c 		if (pmu != NULL) {
pmu               300 arch/powerpc/platforms/powernv/opal-imc.c 					export_imc_mode_and_cmd(imc_dev, pmu);
pmu                60 arch/riscv/include/asm/perf_event.h 	struct pmu	*pmu;
pmu               259 arch/riscv/kernel/perf_event.c 		riscv_pmu->pmu->read(event);
pmu               320 arch/riscv/kernel/perf_event.c 		riscv_pmu->pmu->start(event, PERF_EF_RELOAD);
pmu               335 arch/riscv/kernel/perf_event.c 	riscv_pmu->pmu->stop(event, PERF_EF_UPDATE);
pmu               437 arch/riscv/kernel/perf_event.c static struct pmu min_pmu = {
pmu               448 arch/riscv/kernel/perf_event.c 	.pmu = &min_pmu,
pmu               482 arch/riscv/kernel/perf_event.c 	perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW);
pmu               121 arch/s390/kernel/perf_cpum_cf.c static void cpumf_pmu_enable(struct pmu *pmu)
pmu               144 arch/s390/kernel/perf_cpum_cf.c static void cpumf_pmu_disable(struct pmu *pmu)
pmu               302 arch/s390/kernel/perf_cpum_cf.c 	else if (event->pmu->type == type)
pmu               466 arch/s390/kernel/perf_cpum_cf.c static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
pmu               476 arch/s390/kernel/perf_cpum_cf.c 	perf_pmu_disable(pmu);
pmu               485 arch/s390/kernel/perf_cpum_cf.c static void cpumf_pmu_cancel_txn(struct pmu *pmu)
pmu               499 arch/s390/kernel/perf_cpum_cf.c 	perf_pmu_enable(pmu);
pmu               507 arch/s390/kernel/perf_cpum_cf.c static int cpumf_pmu_commit_txn(struct pmu *pmu)
pmu               526 arch/s390/kernel/perf_cpum_cf.c 	perf_pmu_enable(pmu);
pmu               531 arch/s390/kernel/perf_cpum_cf.c static struct pmu cpumf_pmu = {
pmu               130 arch/s390/kernel/perf_cpum_cf_diag.c static void cf_diag_enable(struct pmu *pmu)
pmu               137 arch/s390/kernel/perf_cpum_cf_diag.c 			    __func__, pmu, smp_processor_id(), cpuhw->flags,
pmu               156 arch/s390/kernel/perf_cpum_cf_diag.c static void cf_diag_disable(struct pmu *pmu)
pmu               164 arch/s390/kernel/perf_cpum_cf_diag.c 			    __func__, pmu, smp_processor_id(), cpuhw->flags,
pmu               248 arch/s390/kernel/perf_cpum_cf_diag.c 			    event, event->cpu, attr->config, event->pmu->type,
pmu               252 arch/s390/kernel/perf_cpum_cf_diag.c 	    event->attr.type != event->pmu->type)
pmu               515 arch/s390/kernel/perf_cpum_cf_diag.c 		event->pmu->stop(event, 0);
pmu               631 arch/s390/kernel/perf_cpum_cf_diag.c static struct pmu cf_diag = {
pmu               934 arch/s390/kernel/perf_cpum_sf.c static void cpumsf_pmu_enable(struct pmu *pmu)
pmu               995 arch/s390/kernel/perf_cpum_sf.c static void cpumsf_pmu_disable(struct pmu *pmu)
pmu              1134 arch/s390/kernel/perf_cpum_sf.c 		event->pmu->stop(event, 0);
pmu              1846 arch/s390/kernel/perf_cpum_sf.c 	perf_pmu_disable(event->pmu);
pmu              1851 arch/s390/kernel/perf_cpum_sf.c 	perf_pmu_enable(event->pmu);
pmu              1864 arch/s390/kernel/perf_cpum_sf.c 	perf_pmu_disable(event->pmu);
pmu              1873 arch/s390/kernel/perf_cpum_sf.c 	perf_pmu_enable(event->pmu);
pmu              1889 arch/s390/kernel/perf_cpum_sf.c 	perf_pmu_disable(event->pmu);
pmu              1934 arch/s390/kernel/perf_cpum_sf.c 	perf_pmu_enable(event->pmu);
pmu              1942 arch/s390/kernel/perf_cpum_sf.c 	perf_pmu_disable(event->pmu);
pmu              1953 arch/s390/kernel/perf_cpum_sf.c 	perf_pmu_enable(event->pmu);
pmu              2008 arch/s390/kernel/perf_cpum_sf.c static struct pmu cpumf_sampling = {
pmu                46 arch/sh/include/asm/hw_breakpoint.h struct pmu;
pmu                71 arch/sh/include/asm/hw_breakpoint.h extern struct pmu perf_ops_bp;
pmu               269 arch/sh/kernel/perf_event.c 	perf_pmu_disable(event->pmu);
pmu               289 arch/sh/kernel/perf_event.c 	perf_pmu_enable(event->pmu);
pmu               325 arch/sh/kernel/perf_event.c static void sh_pmu_enable(struct pmu *pmu)
pmu               333 arch/sh/kernel/perf_event.c static void sh_pmu_disable(struct pmu *pmu)
pmu               341 arch/sh/kernel/perf_event.c static struct pmu pmu = {
pmu               373 arch/sh/kernel/perf_event.c 	pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu               377 arch/sh/kernel/perf_event.c 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
pmu                58 arch/sparc/include/asm/ptrace.h 	struct global_pmu_snapshot	pmu;
pmu              1029 arch/sparc/kernel/perf_event.c static void sparc_pmu_enable(struct pmu *pmu)
pmu              1047 arch/sparc/kernel/perf_event.c static void sparc_pmu_disable(struct pmu *pmu)
pmu              1512 arch/sparc/kernel/perf_event.c static void sparc_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
pmu              1522 arch/sparc/kernel/perf_event.c 	perf_pmu_disable(pmu);
pmu              1530 arch/sparc/kernel/perf_event.c static void sparc_pmu_cancel_txn(struct pmu *pmu)
pmu              1542 arch/sparc/kernel/perf_event.c 	perf_pmu_enable(pmu);
pmu              1550 arch/sparc/kernel/perf_event.c static int sparc_pmu_commit_txn(struct pmu *pmu)
pmu              1572 arch/sparc/kernel/perf_event.c 	perf_pmu_enable(pmu);
pmu              1576 arch/sparc/kernel/perf_event.c static struct pmu pmu = {
pmu              1730 arch/sparc/kernel/perf_event.c 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
pmu               330 arch/sparc/kernel/process_64.c 	pp = &global_cpu_snapshot[this_cpu].pmu;
pmu               369 arch/sparc/kernel/process_64.c 		struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
pmu                83 arch/x86/events/amd/ibs.c 	struct pmu			pmu;
pmu               195 arch/x86/events/amd/ibs.c 	if (perf_ibs_fetch.pmu.type == type)
pmu               197 arch/x86/events/amd/ibs.c 	if (perf_ibs_op.pmu.type == type)
pmu               273 arch/x86/events/amd/ibs.c 	if (event->pmu != &perf_ibs->pmu)
pmu               395 arch/x86/events/amd/ibs.c 	struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
pmu               420 arch/x86/events/amd/ibs.c 	struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
pmu               473 arch/x86/events/amd/ibs.c 	struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
pmu               491 arch/x86/events/amd/ibs.c 	struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
pmu               520 arch/x86/events/amd/ibs.c 	.pmu = {
pmu               545 arch/x86/events/amd/ibs.c 	.pmu = {
pmu               720 arch/x86/events/amd/ibs.c 		perf_ibs->pmu.attr_groups	= perf_ibs->attr_groups;
pmu               723 arch/x86/events/amd/ibs.c 	ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
pmu                38 arch/x86/events/amd/iommu.c 	struct pmu pmu;
pmu               156 arch/x86/events/amd/iommu.c 	struct perf_amd_iommu *piommu = container_of(event->pmu, struct perf_amd_iommu, pmu);
pmu               212 arch/x86/events/amd/iommu.c 	if (event->attr.type != event->pmu->type)
pmu               235 arch/x86/events/amd/iommu.c 	return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu;
pmu               363 arch/x86/events/amd/iommu.c 			container_of(event->pmu, struct perf_amd_iommu, pmu);
pmu               400 arch/x86/events/amd/iommu.c static const struct pmu iommu_pmu __initconst = {
pmu               423 arch/x86/events/amd/iommu.c 	perf_iommu->pmu          = iommu_pmu;
pmu               437 arch/x86/events/amd/iommu.c 	ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1);
pmu                37 arch/x86/events/amd/power.c static struct pmu pmu_class;
pmu               209 arch/x86/events/amd/power.c static struct pmu pmu_class = {
pmu                48 arch/x86/events/amd/uncore.c 	struct pmu *pmu;
pmu                56 arch/x86/events/amd/uncore.c static struct pmu amd_nb_pmu;
pmu                57 arch/x86/events/amd/uncore.c static struct pmu amd_llc_pmu;
pmu                64 arch/x86/events/amd/uncore.c 	return event->pmu->type == amd_nb_pmu.type;
pmu                69 arch/x86/events/amd/uncore.c 	return event->pmu->type == amd_llc_pmu.type;
pmu               188 arch/x86/events/amd/uncore.c 	if (event->attr.type != event->pmu->type)
pmu               237 arch/x86/events/amd/uncore.c 	struct pmu *pmu = dev_get_drvdata(dev);
pmu               239 arch/x86/events/amd/uncore.c 	if (pmu->type == amd_nb_pmu.type)
pmu               241 arch/x86/events/amd/uncore.c 	else if (pmu->type == amd_llc_pmu.type)
pmu               298 arch/x86/events/amd/uncore.c static struct pmu amd_nb_pmu = {
pmu               309 arch/x86/events/amd/uncore.c static struct pmu amd_llc_pmu = {
pmu               339 arch/x86/events/amd/uncore.c 		uncore_nb->pmu = &amd_nb_pmu;
pmu               353 arch/x86/events/amd/uncore.c 		uncore_llc->pmu = &amd_llc_pmu;
pmu               469 arch/x86/events/amd/uncore.c 			perf_pmu_migrate_context(this->pmu, cpu, i);
pmu               576 arch/x86/events/core.c 		if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS))
pmu               645 arch/x86/events/core.c static void x86_pmu_disable(struct pmu *pmu)
pmu               677 arch/x86/events/core.c static struct pmu pmu;
pmu               681 arch/x86/events/core.c 	return event->pmu == &pmu;
pmu               684 arch/x86/events/core.c struct pmu *x86_get_pmu(void)
pmu               686 arch/x86/events/core.c 	return &pmu;
pmu              1113 arch/x86/events/core.c static void x86_pmu_enable(struct pmu *pmu)
pmu              1638 arch/x86/events/core.c 	pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu              1833 arch/x86/events/core.c 	pmu.attr_update = x86_pmu.attr_update;
pmu              1863 arch/x86/events/core.c 	err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
pmu              1895 arch/x86/events/core.c static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
pmu              1905 arch/x86/events/core.c 	perf_pmu_disable(pmu);
pmu              1914 arch/x86/events/core.c static void x86_pmu_cancel_txn(struct pmu *pmu)
pmu              1932 arch/x86/events/core.c 	perf_pmu_enable(pmu);
pmu              1942 arch/x86/events/core.c static int x86_pmu_commit_txn(struct pmu *pmu)
pmu              1971 arch/x86/events/core.c 	perf_pmu_enable(pmu);
pmu              2078 arch/x86/events/core.c 	struct pmu *tmp;
pmu              2098 arch/x86/events/core.c 		tmp = event->pmu;
pmu              2099 arch/x86/events/core.c 		event->pmu = &pmu;
pmu              2106 arch/x86/events/core.c 		event->pmu = tmp;
pmu              2278 arch/x86/events/core.c 	if (!(pmu.capabilities & PERF_PMU_CAP_AUX_OUTPUT))
pmu              2287 arch/x86/events/core.c static struct pmu pmu = {
pmu                64 arch/x86/events/intel/bts.c static struct pmu bts_pmu;
pmu               142 arch/x86/events/intel/cstate.c static struct pmu cstate_core_pmu;
pmu               223 arch/x86/events/intel/cstate.c static struct pmu cstate_pkg_pmu;
pmu               294 arch/x86/events/intel/cstate.c 	struct pmu *pmu = dev_get_drvdata(dev);
pmu               296 arch/x86/events/intel/cstate.c 	if (pmu == &cstate_core_pmu)
pmu               298 arch/x86/events/intel/cstate.c 	else if (pmu == &cstate_pkg_pmu)
pmu               309 arch/x86/events/intel/cstate.c 	if (event->attr.type != event->pmu->type)
pmu               319 arch/x86/events/intel/cstate.c 	if (event->pmu == &cstate_core_pmu) {
pmu               328 arch/x86/events/intel/cstate.c 	} else if (event->pmu == &cstate_pkg_pmu) {
pmu               474 arch/x86/events/intel/cstate.c static struct pmu cstate_core_pmu = {
pmu               489 arch/x86/events/intel/cstate.c static struct pmu cstate_pkg_pmu = {
pmu              1015 arch/x86/events/intel/ds.c 	struct pmu *pmu = event->ctx->pmu;
pmu              1025 arch/x86/events/intel/ds.c 			perf_sched_cb_inc(pmu);
pmu              1027 arch/x86/events/intel/ds.c 			perf_sched_cb_dec(pmu);
pmu              1657 arch/x86/events/intel/ds.c 	perf_pmu_disable(event->pmu);
pmu              1659 arch/x86/events/intel/ds.c 	perf_pmu_enable(event->pmu);
pmu               493 arch/x86/events/intel/lbr.c 	perf_sched_cb_inc(event->ctx->pmu);
pmu               517 arch/x86/events/intel/lbr.c 	perf_sched_cb_dec(event->ctx->pmu);
pmu              1540 arch/x86/events/intel/pt.c 	if (event->attr.type != pt_pmu.pmu.type)
pmu              1569 arch/x86/events/intel/pt.c 	return event->pmu == &pt_pmu.pmu;
pmu              1608 arch/x86/events/intel/pt.c 		pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
pmu              1610 arch/x86/events/intel/pt.c 	pt_pmu.pmu.capabilities	|= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
pmu              1611 arch/x86/events/intel/pt.c 	pt_pmu.pmu.attr_groups		 = pt_attr_groups;
pmu              1612 arch/x86/events/intel/pt.c 	pt_pmu.pmu.task_ctx_nr		 = perf_sw_context;
pmu              1613 arch/x86/events/intel/pt.c 	pt_pmu.pmu.event_init		 = pt_event_init;
pmu              1614 arch/x86/events/intel/pt.c 	pt_pmu.pmu.add			 = pt_event_add;
pmu              1615 arch/x86/events/intel/pt.c 	pt_pmu.pmu.del			 = pt_event_del;
pmu              1616 arch/x86/events/intel/pt.c 	pt_pmu.pmu.start		 = pt_event_start;
pmu              1617 arch/x86/events/intel/pt.c 	pt_pmu.pmu.stop			 = pt_event_stop;
pmu              1618 arch/x86/events/intel/pt.c 	pt_pmu.pmu.read			 = pt_event_read;
pmu              1619 arch/x86/events/intel/pt.c 	pt_pmu.pmu.setup_aux		 = pt_buffer_setup_aux;
pmu              1620 arch/x86/events/intel/pt.c 	pt_pmu.pmu.free_aux		 = pt_buffer_free_aux;
pmu              1621 arch/x86/events/intel/pt.c 	pt_pmu.pmu.addr_filters_sync     = pt_event_addr_filters_sync;
pmu              1622 arch/x86/events/intel/pt.c 	pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
pmu              1623 arch/x86/events/intel/pt.c 	pt_pmu.pmu.nr_addr_filters       =
pmu              1626 arch/x86/events/intel/pt.c 	ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
pmu                44 arch/x86/events/intel/pt.h 	struct pmu		pmu;
pmu               119 arch/x86/events/intel/rapl.c 	struct pmu		*pmu;
pmu               125 arch/x86/events/intel/rapl.c 	struct pmu		pmu;
pmu               211 arch/x86/events/intel/rapl.c static void rapl_start_hrtimer(struct rapl_pmu *pmu)
pmu               213 arch/x86/events/intel/rapl.c        hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
pmu               219 arch/x86/events/intel/rapl.c 	struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
pmu               223 arch/x86/events/intel/rapl.c 	if (!pmu->n_active)
pmu               226 arch/x86/events/intel/rapl.c 	raw_spin_lock_irqsave(&pmu->lock, flags);
pmu               228 arch/x86/events/intel/rapl.c 	list_for_each_entry(event, &pmu->active_list, active_entry)
pmu               231 arch/x86/events/intel/rapl.c 	raw_spin_unlock_irqrestore(&pmu->lock, flags);
pmu               233 arch/x86/events/intel/rapl.c 	hrtimer_forward_now(hrtimer, pmu->timer_interval);
pmu               238 arch/x86/events/intel/rapl.c static void rapl_hrtimer_init(struct rapl_pmu *pmu)
pmu               240 arch/x86/events/intel/rapl.c 	struct hrtimer *hr = &pmu->hrtimer;
pmu               246 arch/x86/events/intel/rapl.c static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
pmu               254 arch/x86/events/intel/rapl.c 	list_add_tail(&event->active_entry, &pmu->active_list);
pmu               258 arch/x86/events/intel/rapl.c 	pmu->n_active++;
pmu               259 arch/x86/events/intel/rapl.c 	if (pmu->n_active == 1)
pmu               260 arch/x86/events/intel/rapl.c 		rapl_start_hrtimer(pmu);
pmu               265 arch/x86/events/intel/rapl.c 	struct rapl_pmu *pmu = event->pmu_private;
pmu               268 arch/x86/events/intel/rapl.c 	raw_spin_lock_irqsave(&pmu->lock, flags);
pmu               269 arch/x86/events/intel/rapl.c 	__rapl_pmu_event_start(pmu, event);
pmu               270 arch/x86/events/intel/rapl.c 	raw_spin_unlock_irqrestore(&pmu->lock, flags);
pmu               275 arch/x86/events/intel/rapl.c 	struct rapl_pmu *pmu = event->pmu_private;
pmu               279 arch/x86/events/intel/rapl.c 	raw_spin_lock_irqsave(&pmu->lock, flags);
pmu               283 arch/x86/events/intel/rapl.c 		WARN_ON_ONCE(pmu->n_active <= 0);
pmu               284 arch/x86/events/intel/rapl.c 		pmu->n_active--;
pmu               285 arch/x86/events/intel/rapl.c 		if (pmu->n_active == 0)
pmu               286 arch/x86/events/intel/rapl.c 			hrtimer_cancel(&pmu->hrtimer);
pmu               304 arch/x86/events/intel/rapl.c 	raw_spin_unlock_irqrestore(&pmu->lock, flags);
pmu               309 arch/x86/events/intel/rapl.c 	struct rapl_pmu *pmu = event->pmu_private;
pmu               313 arch/x86/events/intel/rapl.c 	raw_spin_lock_irqsave(&pmu->lock, flags);
pmu               318 arch/x86/events/intel/rapl.c 		__rapl_pmu_event_start(pmu, event);
pmu               320 arch/x86/events/intel/rapl.c 	raw_spin_unlock_irqrestore(&pmu->lock, flags);
pmu               334 arch/x86/events/intel/rapl.c 	struct rapl_pmu *pmu;
pmu               337 arch/x86/events/intel/rapl.c 	if (event->attr.type != rapl_pmus->pmu.type)
pmu               364 arch/x86/events/intel/rapl.c 	pmu = cpu_to_rapl_pmu(event->cpu);
pmu               365 arch/x86/events/intel/rapl.c 	if (!pmu)
pmu               367 arch/x86/events/intel/rapl.c 	event->cpu = pmu->cpu;
pmu               368 arch/x86/events/intel/rapl.c 	event->pmu_private = pmu;
pmu               526 arch/x86/events/intel/rapl.c 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
pmu               533 arch/x86/events/intel/rapl.c 	pmu->cpu = -1;
pmu               540 arch/x86/events/intel/rapl.c 		pmu->cpu = target;
pmu               541 arch/x86/events/intel/rapl.c 		perf_pmu_migrate_context(pmu->pmu, cpu, target);
pmu               548 arch/x86/events/intel/rapl.c 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
pmu               551 arch/x86/events/intel/rapl.c 	if (!pmu) {
pmu               552 arch/x86/events/intel/rapl.c 		pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
pmu               553 arch/x86/events/intel/rapl.c 		if (!pmu)
pmu               556 arch/x86/events/intel/rapl.c 		raw_spin_lock_init(&pmu->lock);
pmu               557 arch/x86/events/intel/rapl.c 		INIT_LIST_HEAD(&pmu->active_list);
pmu               558 arch/x86/events/intel/rapl.c 		pmu->pmu = &rapl_pmus->pmu;
pmu               559 arch/x86/events/intel/rapl.c 		pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
pmu               560 arch/x86/events/intel/rapl.c 		rapl_hrtimer_init(pmu);
pmu               562 arch/x86/events/intel/rapl.c 		rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
pmu               574 arch/x86/events/intel/rapl.c 	pmu->cpu = cpu;
pmu               657 arch/x86/events/intel/rapl.c 	rapl_pmus->pmu.attr_groups	= rapl_attr_groups;
pmu               658 arch/x86/events/intel/rapl.c 	rapl_pmus->pmu.attr_update	= rapl_attr_update;
pmu               659 arch/x86/events/intel/rapl.c 	rapl_pmus->pmu.task_ctx_nr	= perf_invalid_context;
pmu               660 arch/x86/events/intel/rapl.c 	rapl_pmus->pmu.event_init	= rapl_pmu_event_init;
pmu               661 arch/x86/events/intel/rapl.c 	rapl_pmus->pmu.add		= rapl_pmu_event_add;
pmu               662 arch/x86/events/intel/rapl.c 	rapl_pmus->pmu.del		= rapl_pmu_event_del;
pmu               663 arch/x86/events/intel/rapl.c 	rapl_pmus->pmu.start		= rapl_pmu_event_start;
pmu               664 arch/x86/events/intel/rapl.c 	rapl_pmus->pmu.stop		= rapl_pmu_event_stop;
pmu               665 arch/x86/events/intel/rapl.c 	rapl_pmus->pmu.read		= rapl_pmu_event_read;
pmu               666 arch/x86/events/intel/rapl.c 	rapl_pmus->pmu.module		= THIS_MODULE;
pmu               667 arch/x86/events/intel/rapl.c 	rapl_pmus->pmu.capabilities	= PERF_PMU_CAP_NO_EXCLUDE;
pmu               780 arch/x86/events/intel/rapl.c 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
pmu               799 arch/x86/events/intel/rapl.c 	perf_pmu_unregister(&rapl_pmus->pmu);
pmu               103 arch/x86/events/intel/uncore.c struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
pmu               111 arch/x86/events/intel/uncore.c 	return dieid < max_dies ? pmu->boxes[dieid] : NULL;
pmu               349 arch/x86/events/intel/uncore.c 	return &box->pmu->pmu == event->pmu;
pmu               359 arch/x86/events/intel/uncore.c 	max_count = box->pmu->type->num_counters;
pmu               360 arch/x86/events/intel/uncore.c 	if (box->pmu->type->fixed_ctl)
pmu               393 arch/x86/events/intel/uncore.c 	struct intel_uncore_type *type = box->pmu->type;
pmu               418 arch/x86/events/intel/uncore.c 	if (box->pmu->type->ops->put_constraint)
pmu               419 arch/x86/events/intel/uncore.c 		box->pmu->type->ops->put_constraint(box, event);
pmu               657 arch/x86/events/intel/uncore.c static int uncore_validate_group(struct intel_uncore_pmu *pmu,
pmu               668 arch/x86/events/intel/uncore.c 	fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
pmu               672 arch/x86/events/intel/uncore.c 	fake_box->pmu = pmu;
pmu               698 arch/x86/events/intel/uncore.c 	struct intel_uncore_pmu *pmu;
pmu               703 arch/x86/events/intel/uncore.c 	if (event->attr.type != event->pmu->type)
pmu               706 arch/x86/events/intel/uncore.c 	pmu = uncore_event_to_pmu(event);
pmu               708 arch/x86/events/intel/uncore.c 	if (pmu->func_id < 0)
pmu               721 arch/x86/events/intel/uncore.c 	box = uncore_pmu_to_box(pmu, event->cpu);
pmu               736 arch/x86/events/intel/uncore.c 		if (!pmu->type->fixed_ctl)
pmu               742 arch/x86/events/intel/uncore.c 		if (pmu->type->single_fixed && pmu->pmu_idx > 0)
pmu               761 arch/x86/events/intel/uncore.c 			      (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32));
pmu               762 arch/x86/events/intel/uncore.c 		if (pmu->type->ops->hw_config) {
pmu               763 arch/x86/events/intel/uncore.c 			ret = pmu->type->ops->hw_config(box, event);
pmu               770 arch/x86/events/intel/uncore.c 		ret = uncore_validate_group(pmu, event);
pmu               777 arch/x86/events/intel/uncore.c static void uncore_pmu_enable(struct pmu *pmu)
pmu               782 arch/x86/events/intel/uncore.c 	uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
pmu               794 arch/x86/events/intel/uncore.c static void uncore_pmu_disable(struct pmu *pmu)
pmu               799 arch/x86/events/intel/uncore.c 	uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
pmu               828 arch/x86/events/intel/uncore.c static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
pmu               832 arch/x86/events/intel/uncore.c 	if (!pmu->type->pmu) {
pmu               833 arch/x86/events/intel/uncore.c 		pmu->pmu = (struct pmu) {
pmu               834 arch/x86/events/intel/uncore.c 			.attr_groups	= pmu->type->attr_groups,
pmu               848 arch/x86/events/intel/uncore.c 		pmu->pmu = *pmu->type->pmu;
pmu               849 arch/x86/events/intel/uncore.c 		pmu->pmu.attr_groups = pmu->type->attr_groups;
pmu               852 arch/x86/events/intel/uncore.c 	if (pmu->type->num_boxes == 1) {
pmu               853 arch/x86/events/intel/uncore.c 		if (strlen(pmu->type->name) > 0)
pmu               854 arch/x86/events/intel/uncore.c 			sprintf(pmu->name, "uncore_%s", pmu->type->name);
pmu               856 arch/x86/events/intel/uncore.c 			sprintf(pmu->name, "uncore");
pmu               858 arch/x86/events/intel/uncore.c 		sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
pmu               859 arch/x86/events/intel/uncore.c 			pmu->pmu_idx);
pmu               862 arch/x86/events/intel/uncore.c 	ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
pmu               864 arch/x86/events/intel/uncore.c 		pmu->registered = true;
pmu               868 arch/x86/events/intel/uncore.c static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
pmu               870 arch/x86/events/intel/uncore.c 	if (!pmu->registered)
pmu               872 arch/x86/events/intel/uncore.c 	perf_pmu_unregister(&pmu->pmu);
pmu               873 arch/x86/events/intel/uncore.c 	pmu->registered = false;
pmu               876 arch/x86/events/intel/uncore.c static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
pmu               881 arch/x86/events/intel/uncore.c 		kfree(pmu->boxes[die]);
pmu               882 arch/x86/events/intel/uncore.c 	kfree(pmu->boxes);
pmu               887 arch/x86/events/intel/uncore.c 	struct intel_uncore_pmu *pmu = type->pmus;
pmu               890 arch/x86/events/intel/uncore.c 	if (pmu) {
pmu               891 arch/x86/events/intel/uncore.c 		for (i = 0; i < type->num_boxes; i++, pmu++) {
pmu               892 arch/x86/events/intel/uncore.c 			uncore_pmu_unregister(pmu);
pmu               893 arch/x86/events/intel/uncore.c 			uncore_free_boxes(pmu);
pmu               986 arch/x86/events/intel/uncore.c 	struct intel_uncore_pmu *pmu = NULL;
pmu              1025 arch/x86/events/intel/uncore.c 					pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
pmu              1031 arch/x86/events/intel/uncore.c 		if (pmu == NULL)
pmu              1038 arch/x86/events/intel/uncore.c 		pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
pmu              1041 arch/x86/events/intel/uncore.c 	if (WARN_ON_ONCE(pmu->boxes[die] != NULL))
pmu              1048 arch/x86/events/intel/uncore.c 	if (pmu->func_id < 0)
pmu              1049 arch/x86/events/intel/uncore.c 		pmu->func_id = pdev->devfn;
pmu              1051 arch/x86/events/intel/uncore.c 		WARN_ON_ONCE(pmu->func_id != pdev->devfn);
pmu              1057 arch/x86/events/intel/uncore.c 	box->pmu = pmu;
pmu              1061 arch/x86/events/intel/uncore.c 	pmu->boxes[die] = box;
pmu              1062 arch/x86/events/intel/uncore.c 	if (atomic_inc_return(&pmu->activeboxes) > 1)
pmu              1066 arch/x86/events/intel/uncore.c 	ret = uncore_pmu_register(pmu);
pmu              1069 arch/x86/events/intel/uncore.c 		pmu->boxes[die] = NULL;
pmu              1079 arch/x86/events/intel/uncore.c 	struct intel_uncore_pmu *pmu;
pmu              1098 arch/x86/events/intel/uncore.c 	pmu = box->pmu;
pmu              1103 arch/x86/events/intel/uncore.c 	pmu->boxes[box->dieid] = NULL;
pmu              1104 arch/x86/events/intel/uncore.c 	if (atomic_dec_return(&pmu->activeboxes) == 0)
pmu              1105 arch/x86/events/intel/uncore.c 		uncore_pmu_unregister(pmu);
pmu              1160 arch/x86/events/intel/uncore.c 	struct intel_uncore_pmu *pmu = type->pmus;
pmu              1165 arch/x86/events/intel/uncore.c 	for (i = 0; i < type->num_boxes; i++, pmu++) {
pmu              1166 arch/x86/events/intel/uncore.c 		box = pmu->boxes[die];
pmu              1182 arch/x86/events/intel/uncore.c 		perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu);
pmu              1197 arch/x86/events/intel/uncore.c 	struct intel_uncore_pmu *pmu;
pmu              1203 arch/x86/events/intel/uncore.c 		pmu = type->pmus;
pmu              1204 arch/x86/events/intel/uncore.c 		for (i = 0; i < type->num_boxes; i++, pmu++) {
pmu              1205 arch/x86/events/intel/uncore.c 			box = pmu->boxes[id];
pmu              1245 arch/x86/events/intel/uncore.c 	struct intel_uncore_pmu *pmu;
pmu              1252 arch/x86/events/intel/uncore.c 		pmu = type->pmus;
pmu              1253 arch/x86/events/intel/uncore.c 		for (i = 0; i < type->num_boxes; i++, pmu++) {
pmu              1254 arch/x86/events/intel/uncore.c 			if (pmu->boxes[die])
pmu              1259 arch/x86/events/intel/uncore.c 			box->pmu = pmu;
pmu              1267 arch/x86/events/intel/uncore.c 		box->pmu->boxes[die] = box;
pmu              1283 arch/x86/events/intel/uncore.c 	struct intel_uncore_pmu *pmu;
pmu              1293 arch/x86/events/intel/uncore.c 		pmu = type->pmus;
pmu              1294 arch/x86/events/intel/uncore.c 		for (i = 0; i < type->num_boxes; i++, pmu++) {
pmu              1295 arch/x86/events/intel/uncore.c 			box = pmu->boxes[id];
pmu                75 arch/x86/events/intel/uncore.h 	struct pmu *pmu; /* for custom pmu ops */
pmu                97 arch/x86/events/intel/uncore.h 	struct pmu			pmu;
pmu               127 arch/x86/events/intel/uncore.h 	struct intel_uncore_pmu *pmu;
pmu               201 arch/x86/events/intel/uncore.h 	return box->pmu->type->box_ctl +
pmu               202 arch/x86/events/intel/uncore.h 	       box->pmu->type->mmio_offset * box->pmu->pmu_idx;
pmu               207 arch/x86/events/intel/uncore.h 	return box->pmu->type->box_ctl;
pmu               212 arch/x86/events/intel/uncore.h 	return box->pmu->type->fixed_ctl;
pmu               217 arch/x86/events/intel/uncore.h 	return box->pmu->type->fixed_ctr;
pmu               224 arch/x86/events/intel/uncore.h 		return idx * 8 + box->pmu->type->event_ctl;
pmu               226 arch/x86/events/intel/uncore.h 	return idx * 4 + box->pmu->type->event_ctl;
pmu               232 arch/x86/events/intel/uncore.h 	return idx * 8 + box->pmu->type->perf_ctr;
pmu               237 arch/x86/events/intel/uncore.h 	struct intel_uncore_pmu *pmu = box->pmu;
pmu               238 arch/x86/events/intel/uncore.h 	return pmu->type->msr_offsets ?
pmu               239 arch/x86/events/intel/uncore.h 		pmu->type->msr_offsets[pmu->pmu_idx] :
pmu               240 arch/x86/events/intel/uncore.h 		pmu->type->msr_offset * pmu->pmu_idx;
pmu               245 arch/x86/events/intel/uncore.h 	if (!box->pmu->type->box_ctl)
pmu               247 arch/x86/events/intel/uncore.h 	return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
pmu               252 arch/x86/events/intel/uncore.h 	if (!box->pmu->type->fixed_ctl)
pmu               254 arch/x86/events/intel/uncore.h 	return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
pmu               259 arch/x86/events/intel/uncore.h 	return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
pmu               309 arch/x86/events/intel/uncore.h 	struct intel_uncore_pmu *pmu = box->pmu;
pmu               311 arch/x86/events/intel/uncore.h 	return pmu->type->freerunning[type].counter_base +
pmu               312 arch/x86/events/intel/uncore.h 	       pmu->type->freerunning[type].counter_offset * idx +
pmu               313 arch/x86/events/intel/uncore.h 	       pmu->type->freerunning[type].box_offset * pmu->pmu_idx;
pmu               321 arch/x86/events/intel/uncore.h 		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
pmu               323 arch/x86/events/intel/uncore.h 		return box->pmu->type->event_ctl +
pmu               324 arch/x86/events/intel/uncore.h 		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
pmu               334 arch/x86/events/intel/uncore.h 		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
pmu               336 arch/x86/events/intel/uncore.h 		return box->pmu->type->perf_ctr +
pmu               337 arch/x86/events/intel/uncore.h 		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
pmu               380 arch/x86/events/intel/uncore.h 	return box->pmu->type->perf_ctr_bits;
pmu               385 arch/x86/events/intel/uncore.h 	return box->pmu->type->fixed_ctr_bits;
pmu               394 arch/x86/events/intel/uncore.h 	return box->pmu->type->freerunning[type].bits;
pmu               402 arch/x86/events/intel/uncore.h 	return box->pmu->type->freerunning[type].num_counters;
pmu               408 arch/x86/events/intel/uncore.h 	return box->pmu->type->num_freerunning_types;
pmu               423 arch/x86/events/intel/uncore.h 	return box->pmu->type->num_counters;
pmu               447 arch/x86/events/intel/uncore.h 	box->pmu->type->ops->disable_event(box, event);
pmu               453 arch/x86/events/intel/uncore.h 	box->pmu->type->ops->enable_event(box, event);
pmu               459 arch/x86/events/intel/uncore.h 	return box->pmu->type->ops->read_counter(box, event);
pmu               465 arch/x86/events/intel/uncore.h 		if (box->pmu->type->ops->init_box)
pmu               466 arch/x86/events/intel/uncore.h 			box->pmu->type->ops->init_box(box);
pmu               473 arch/x86/events/intel/uncore.h 		if (box->pmu->type->ops->exit_box)
pmu               474 arch/x86/events/intel/uncore.h 			box->pmu->type->ops->exit_box(box);
pmu               485 arch/x86/events/intel/uncore.h 	return container_of(event->pmu, struct intel_uncore_pmu, pmu);
pmu               493 arch/x86/events/intel/uncore.h struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
pmu               251 arch/x86/events/intel/uncore_nhmex.c 	else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
pmu               367 arch/x86/events/intel/uncore_nhmex.c 	if (box->pmu->pmu_idx == 0)
pmu               452 arch/x86/events/intel/uncore_nhmex.c 	if (box->pmu->pmu_idx == 0)
pmu               767 arch/x86/events/intel/uncore_nhmex.c 	struct intel_uncore_type *type = box->pmu->type;
pmu               784 arch/x86/events/intel/uncore_nhmex.c 		msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
pmu               811 arch/x86/events/intel/uncore_nhmex.c 		if (box->pmu->pmu_idx == 0)
pmu              1119 arch/x86/events/intel/uncore_nhmex.c 	port = idx / 6 + box->pmu->pmu_idx * 4;
pmu               138 arch/x86/events/intel/uncore_snb.c 	if (box->pmu->pmu_idx == 0) {
pmu               152 arch/x86/events/intel/uncore_snb.c 	if (box->pmu->pmu_idx == 0)
pmu               237 arch/x86/events/intel/uncore_snb.c 	if (box->pmu->pmu_idx == 0) {
pmu               243 arch/x86/events/intel/uncore_snb.c 	if (box->pmu->pmu_idx == 7)
pmu               255 arch/x86/events/intel/uncore_snb.c 	if (box->pmu->pmu_idx == 0)
pmu               450 arch/x86/events/intel/uncore_snb.c 	struct intel_uncore_pmu *pmu;
pmu               456 arch/x86/events/intel/uncore_snb.c 	if (event->attr.type != event->pmu->type)
pmu               459 arch/x86/events/intel/uncore_snb.c 	pmu = uncore_event_to_pmu(event);
pmu               461 arch/x86/events/intel/uncore_snb.c 	if (pmu->func_id < 0)
pmu               483 arch/x86/events/intel/uncore_snb.c 	box = uncore_pmu_to_box(pmu, event->cpu);
pmu               557 arch/x86/events/intel/uncore_snb.c static struct pmu snb_uncore_imc_pmu = {
pmu               588 arch/x86/events/intel/uncore_snb.c 	.pmu		= &snb_uncore_imc_pmu,
pmu               940 arch/x86/events/intel/uncore_snbep.c 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
pmu              1123 arch/x86/events/intel/uncore_snbep.c 		int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
pmu              1624 arch/x86/events/intel/uncore_snbep.c 			SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
pmu              2052 arch/x86/events/intel/uncore_snbep.c 			    KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
pmu              2607 arch/x86/events/intel/uncore_snbep.c 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
pmu              3500 arch/x86/events/intel/uncore_snbep.c 			    HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
pmu              4072 arch/x86/events/intel/uncore_snbep.c 		    box->pmu->type->msr_offset * box->pmu->pmu_idx;
pmu               193 arch/x86/events/msr.c 	if (event->attr.type != event->pmu->type)
pmu               278 arch/x86/events/msr.c static struct pmu pmu_msr = {
pmu               766 arch/x86/events/perf_event.h struct pmu *x86_get_pmu(void);
pmu                54 arch/x86/include/asm/hw_breakpoint.h struct pmu;
pmu                78 arch/x86/include/asm/hw_breakpoint.h extern struct pmu perf_ops_bp;
pmu               722 arch/x86/include/asm/kvm_host.h 	struct kvm_pmu pmu;
pmu                52 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
pmu                53 arch/x86/kvm/pmu.c 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
pmu                63 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
pmu                66 arch/x86/kvm/pmu.c 			      (unsigned long *)&pmu->reprogram_pmi)) {
pmu                67 arch/x86/kvm/pmu.c 		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
pmu                77 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
pmu                80 arch/x86/kvm/pmu.c 			      (unsigned long *)&pmu->reprogram_pmi)) {
pmu                81 arch/x86/kvm/pmu.c 		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
pmu               235 arch/x86/kvm/pmu.c void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
pmu               237 arch/x86/kvm/pmu.c 	struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
pmu               246 arch/x86/kvm/pmu.c 		u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
pmu               255 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               259 arch/x86/kvm/pmu.c 	bitmask = pmu->reprogram_pmi;
pmu               262 arch/x86/kvm/pmu.c 		struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
pmu               265 arch/x86/kvm/pmu.c 			clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
pmu               269 arch/x86/kvm/pmu.c 		reprogram_counter(pmu, bit);
pmu               316 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               320 arch/x86/kvm/pmu.c 	if (!pmu->version)
pmu               366 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               368 arch/x86/kvm/pmu.c 	irq_work_sync(&pmu->irq_work);
pmu               374 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               376 arch/x86/kvm/pmu.c 	memset(pmu, 0, sizeof(*pmu));
pmu               378 arch/x86/kvm/pmu.c 	init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
pmu                 7 arch/x86/kvm/pmu.h #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
pmu                 8 arch/x86/kvm/pmu.h #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
pmu                 9 arch/x86/kvm/pmu.h #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
pmu                25 arch/x86/kvm/pmu.h 	unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
pmu                29 arch/x86/kvm/pmu.h 	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
pmu                43 arch/x86/kvm/pmu.h 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
pmu                45 arch/x86/kvm/pmu.h 	return pmu->counter_bitmask[pmc->type];
pmu                88 arch/x86/kvm/pmu.h static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
pmu                91 arch/x86/kvm/pmu.h 	if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
pmu                93 arch/x86/kvm/pmu.h 					       pmu->nr_arch_gp_counters);
pmu                95 arch/x86/kvm/pmu.h 		return &pmu->gp_counters[index];
pmu               102 arch/x86/kvm/pmu.h static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
pmu               106 arch/x86/kvm/pmu.h 	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
pmu               108 arch/x86/kvm/pmu.h 					       pmu->nr_arch_fixed_counters);
pmu               110 arch/x86/kvm/pmu.h 		return &pmu->fixed_counters[index];
pmu               118 arch/x86/kvm/pmu.h void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
pmu                47 arch/x86/kvm/pmu_amd.c static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
pmu                49 arch/x86/kvm/pmu_amd.c 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
pmu                98 arch/x86/kvm/pmu_amd.c static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
pmu               126 arch/x86/kvm/pmu_amd.c 	return &pmu->gp_counters[msr_to_index(msr)];
pmu               129 arch/x86/kvm/pmu_amd.c static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
pmu               160 arch/x86/kvm/pmu_amd.c static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
pmu               162 arch/x86/kvm/pmu_amd.c 	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
pmu               163 arch/x86/kvm/pmu_amd.c 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
pmu               173 arch/x86/kvm/pmu_amd.c 	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
pmu               179 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               183 arch/x86/kvm/pmu_amd.c 	return (idx >= pmu->nr_arch_gp_counters);
pmu               189 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               193 arch/x86/kvm/pmu_amd.c 	if (idx >= pmu->nr_arch_gp_counters)
pmu               195 arch/x86/kvm/pmu_amd.c 	counters = pmu->gp_counters;
pmu               202 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               205 arch/x86/kvm/pmu_amd.c 	ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) ||
pmu               206 arch/x86/kvm/pmu_amd.c 		get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
pmu               213 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               217 arch/x86/kvm/pmu_amd.c 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
pmu               223 arch/x86/kvm/pmu_amd.c 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
pmu               234 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               240 arch/x86/kvm/pmu_amd.c 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
pmu               246 arch/x86/kvm/pmu_amd.c 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
pmu               250 arch/x86/kvm/pmu_amd.c 		if (!(data & pmu->reserved_bits)) {
pmu               261 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               264 arch/x86/kvm/pmu_amd.c 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
pmu               266 arch/x86/kvm/pmu_amd.c 		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
pmu               268 arch/x86/kvm/pmu_amd.c 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
pmu               269 arch/x86/kvm/pmu_amd.c 	pmu->reserved_bits = 0xffffffff00200000ull;
pmu               270 arch/x86/kvm/pmu_amd.c 	pmu->version = 1;
pmu               272 arch/x86/kvm/pmu_amd.c 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu               273 arch/x86/kvm/pmu_amd.c 	pmu->nr_arch_fixed_counters = 0;
pmu               274 arch/x86/kvm/pmu_amd.c 	pmu->global_status = 0;
pmu               279 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               285 arch/x86/kvm/pmu_amd.c 		pmu->gp_counters[i].type = KVM_PMC_GP;
pmu               286 arch/x86/kvm/pmu_amd.c 		pmu->gp_counters[i].vcpu = vcpu;
pmu               287 arch/x86/kvm/pmu_amd.c 		pmu->gp_counters[i].idx = i;
pmu               293 arch/x86/kvm/pmu_amd.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               297 arch/x86/kvm/pmu_amd.c 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
pmu                35 arch/x86/kvm/vmx/pmu_intel.c static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
pmu                39 arch/x86/kvm/vmx/pmu_intel.c 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
pmu                41 arch/x86/kvm/vmx/pmu_intel.c 		u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
pmu                44 arch/x86/kvm/vmx/pmu_intel.c 		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
pmu                52 arch/x86/kvm/vmx/pmu_intel.c 	pmu->fixed_ctr_ctrl = data;
pmu                56 arch/x86/kvm/vmx/pmu_intel.c static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
pmu                59 arch/x86/kvm/vmx/pmu_intel.c 	u64 diff = pmu->global_ctrl ^ data;
pmu                61 arch/x86/kvm/vmx/pmu_intel.c 	pmu->global_ctrl = data;
pmu                64 arch/x86/kvm/vmx/pmu_intel.c 		reprogram_counter(pmu, bit);
pmu                67 arch/x86/kvm/vmx/pmu_intel.c static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
pmu                76 arch/x86/kvm/vmx/pmu_intel.c 		    && (pmu->available_event_types & (1 << i)))
pmu               100 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
pmu               102 arch/x86/kvm/vmx/pmu_intel.c 	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
pmu               105 arch/x86/kvm/vmx/pmu_intel.c static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
pmu               108 arch/x86/kvm/vmx/pmu_intel.c 		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
pmu               113 arch/x86/kvm/vmx/pmu_intel.c 		return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
pmu               120 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               125 arch/x86/kvm/vmx/pmu_intel.c 	return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
pmu               126 arch/x86/kvm/vmx/pmu_intel.c 		(fixed && idx >= pmu->nr_arch_fixed_counters);
pmu               132 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               139 arch/x86/kvm/vmx/pmu_intel.c 		counters = pmu->fixed_counters;
pmu               140 arch/x86/kvm/vmx/pmu_intel.c 		num_counters = pmu->nr_arch_fixed_counters;
pmu               142 arch/x86/kvm/vmx/pmu_intel.c 		counters = pmu->gp_counters;
pmu               143 arch/x86/kvm/vmx/pmu_intel.c 		num_counters = pmu->nr_arch_gp_counters;
pmu               147 arch/x86/kvm/vmx/pmu_intel.c 	*mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
pmu               153 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               161 arch/x86/kvm/vmx/pmu_intel.c 		ret = pmu->version > 1;
pmu               164 arch/x86/kvm/vmx/pmu_intel.c 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
pmu               165 arch/x86/kvm/vmx/pmu_intel.c 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
pmu               166 arch/x86/kvm/vmx/pmu_intel.c 			get_fixed_pmc(pmu, msr);
pmu               175 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               180 arch/x86/kvm/vmx/pmu_intel.c 		*data = pmu->fixed_ctr_ctrl;
pmu               183 arch/x86/kvm/vmx/pmu_intel.c 		*data = pmu->global_status;
pmu               186 arch/x86/kvm/vmx/pmu_intel.c 		*data = pmu->global_ctrl;
pmu               189 arch/x86/kvm/vmx/pmu_intel.c 		*data = pmu->global_ovf_ctrl;
pmu               192 arch/x86/kvm/vmx/pmu_intel.c 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
pmu               194 arch/x86/kvm/vmx/pmu_intel.c 			*data = val & pmu->counter_bitmask[KVM_PMC_GP];
pmu               196 arch/x86/kvm/vmx/pmu_intel.c 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmu               198 arch/x86/kvm/vmx/pmu_intel.c 			*data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
pmu               200 arch/x86/kvm/vmx/pmu_intel.c 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
pmu               211 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               218 arch/x86/kvm/vmx/pmu_intel.c 		if (pmu->fixed_ctr_ctrl == data)
pmu               221 arch/x86/kvm/vmx/pmu_intel.c 			reprogram_fixed_counters(pmu, data);
pmu               227 arch/x86/kvm/vmx/pmu_intel.c 			pmu->global_status = data;
pmu               232 arch/x86/kvm/vmx/pmu_intel.c 		if (pmu->global_ctrl == data)
pmu               234 arch/x86/kvm/vmx/pmu_intel.c 		if (!(data & pmu->global_ctrl_mask)) {
pmu               235 arch/x86/kvm/vmx/pmu_intel.c 			global_ctrl_changed(pmu, data);
pmu               240 arch/x86/kvm/vmx/pmu_intel.c 		if (!(data & pmu->global_ovf_ctrl_mask)) {
pmu               242 arch/x86/kvm/vmx/pmu_intel.c 				pmu->global_status &= ~data;
pmu               243 arch/x86/kvm/vmx/pmu_intel.c 			pmu->global_ovf_ctrl = data;
pmu               248 arch/x86/kvm/vmx/pmu_intel.c 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
pmu               254 arch/x86/kvm/vmx/pmu_intel.c 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmu               257 arch/x86/kvm/vmx/pmu_intel.c 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
pmu               260 arch/x86/kvm/vmx/pmu_intel.c 			if (!(data & pmu->reserved_bits)) {
pmu               272 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               278 arch/x86/kvm/vmx/pmu_intel.c 	pmu->nr_arch_gp_counters = 0;
pmu               279 arch/x86/kvm/vmx/pmu_intel.c 	pmu->nr_arch_fixed_counters = 0;
pmu               280 arch/x86/kvm/vmx/pmu_intel.c 	pmu->counter_bitmask[KVM_PMC_GP] = 0;
pmu               281 arch/x86/kvm/vmx/pmu_intel.c 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu               282 arch/x86/kvm/vmx/pmu_intel.c 	pmu->version = 0;
pmu               283 arch/x86/kvm/vmx/pmu_intel.c 	pmu->reserved_bits = 0xffffffff00200000ull;
pmu               291 arch/x86/kvm/vmx/pmu_intel.c 	pmu->version = eax.split.version_id;
pmu               292 arch/x86/kvm/vmx/pmu_intel.c 	if (!pmu->version)
pmu               297 arch/x86/kvm/vmx/pmu_intel.c 	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
pmu               299 arch/x86/kvm/vmx/pmu_intel.c 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
pmu               300 arch/x86/kvm/vmx/pmu_intel.c 	pmu->available_event_types = ~entry->ebx &
pmu               303 arch/x86/kvm/vmx/pmu_intel.c 	if (pmu->version == 1) {
pmu               304 arch/x86/kvm/vmx/pmu_intel.c 		pmu->nr_arch_fixed_counters = 0;
pmu               306 arch/x86/kvm/vmx/pmu_intel.c 		pmu->nr_arch_fixed_counters =
pmu               309 arch/x86/kvm/vmx/pmu_intel.c 		pmu->counter_bitmask[KVM_PMC_FIXED] =
pmu               313 arch/x86/kvm/vmx/pmu_intel.c 	pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
pmu               314 arch/x86/kvm/vmx/pmu_intel.c 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu               315 arch/x86/kvm/vmx/pmu_intel.c 	pmu->global_ctrl_mask = ~pmu->global_ctrl;
pmu               316 arch/x86/kvm/vmx/pmu_intel.c 	pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
pmu               320 arch/x86/kvm/vmx/pmu_intel.c 		pmu->global_ovf_ctrl_mask &=
pmu               327 arch/x86/kvm/vmx/pmu_intel.c 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
pmu               333 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               336 arch/x86/kvm/vmx/pmu_intel.c 		pmu->gp_counters[i].type = KVM_PMC_GP;
pmu               337 arch/x86/kvm/vmx/pmu_intel.c 		pmu->gp_counters[i].vcpu = vcpu;
pmu               338 arch/x86/kvm/vmx/pmu_intel.c 		pmu->gp_counters[i].idx = i;
pmu               342 arch/x86/kvm/vmx/pmu_intel.c 		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu               343 arch/x86/kvm/vmx/pmu_intel.c 		pmu->fixed_counters[i].vcpu = vcpu;
pmu               344 arch/x86/kvm/vmx/pmu_intel.c 		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
pmu               350 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
pmu               355 arch/x86/kvm/vmx/pmu_intel.c 		pmc = &pmu->gp_counters[i];
pmu               362 arch/x86/kvm/vmx/pmu_intel.c 		pmc = &pmu->fixed_counters[i];
pmu               368 arch/x86/kvm/vmx/pmu_intel.c 	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
pmu               369 arch/x86/kvm/vmx/pmu_intel.c 		pmu->global_ovf_ctrl = 0;
pmu              7187 arch/x86/kvm/x86.c 			(unsigned long *)&vcpu->arch.pmu.global_status);
pmu               203 arch/x86/xen/pmu.c 	ctxt = &xenpmu_data->pmu.c.intel;
pmu               268 arch/x86/xen/pmu.c 	ctxt = &xenpmu_data->pmu.c.amd;
pmu               352 arch/x86/xen/pmu.c 	ctxt = &xenpmu_data->pmu.c.amd;
pmu               377 arch/x86/xen/pmu.c 	ctxt = &xenpmu_data->pmu.c.intel;
pmu               405 arch/x86/xen/pmu.c 	xenpmu_data->pmu.l.lapic_lvtpc = val;
pmu               440 arch/x86/xen/pmu.c 	if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_PV)
pmu               441 arch/x86/xen/pmu.c 		return (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_USER);
pmu               443 arch/x86/xen/pmu.c 		return !!(xenpmu_data->pmu.r.regs.cpl & 3);
pmu               455 arch/x86/xen/pmu.c 	return xenpmu_data->pmu.r.regs.ip;
pmu               499 arch/x86/xen/pmu.c 	xen_convert_regs(&xenpmu_data->pmu.r.regs, &regs,
pmu               500 arch/x86/xen/pmu.c 			 xenpmu_data->pmu.pmu_flags);
pmu               188 arch/xtensa/kernel/perf_event.c static void xtensa_pmu_enable(struct pmu *pmu)
pmu               193 arch/xtensa/kernel/perf_event.c static void xtensa_pmu_disable(struct pmu *pmu)
pmu               393 arch/xtensa/kernel/perf_event.c static struct pmu xtensa_pmu = {
pmu               142 drivers/bcma/driver_chipcommon_pmu.c 	if (cc->pmu.rev >= 2)
pmu               277 drivers/bcma/driver_chipcommon_pmu.c 		cc->pmu.core = bcma_find_core(bus, BCMA_CORE_PMU);
pmu               278 drivers/bcma/driver_chipcommon_pmu.c 		if (!cc->pmu.core)
pmu               281 drivers/bcma/driver_chipcommon_pmu.c 	if (!cc->pmu.core)
pmu               282 drivers/bcma/driver_chipcommon_pmu.c 		cc->pmu.core = cc->core;
pmu               285 drivers/bcma/driver_chipcommon_pmu.c 	cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
pmu               287 drivers/bcma/driver_chipcommon_pmu.c 	bcma_debug(bus, "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
pmu               293 drivers/bcma/driver_chipcommon_pmu.c 	if (cc->pmu.rev == 1)
pmu               341 drivers/bcma/driver_chipcommon_pmu.c 			  bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
pmu               441 drivers/bcma/driver_chipcommon_pmu.c 			  bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
pmu               456 drivers/bcma/driver_chipcommon_pmu.c 	if (cc->pmu.rev >= 5) {
pmu                38 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 	struct pmu pmu;
pmu                51 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 	if (event->attr.type != event->pmu->type)
pmu                64 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 	struct amdgpu_pmu_entry *pe = container_of(event->pmu,
pmu                66 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 						  pmu);
pmu                93 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 	struct amdgpu_pmu_entry *pe = container_of(event->pmu,
pmu                95 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 						  pmu);
pmu               120 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 	struct amdgpu_pmu_entry *pe = container_of(event->pmu,
pmu               122 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 						  pmu);
pmu               151 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 	struct amdgpu_pmu_entry *pe = container_of(event->pmu,
pmu               153 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 						  pmu);
pmu               179 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 	struct amdgpu_pmu_entry *pe = container_of(event->pmu,
pmu               181 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 						  pmu);
pmu               215 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 	pmu_entry->pmu = (struct pmu){
pmu               225 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 	pmu_entry->pmu.attr_groups = attr_groups;
pmu               230 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 	ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
pmu               276 drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c 			perf_pmu_unregister(&pe->pmu);
pmu               388 drivers/gpu/drm/i915/gt/intel_engine_types.h 	} pmu;
pmu              1752 drivers/gpu/drm/i915/i915_drv.h 	struct i915_pmu pmu;
pmu                80 drivers/gpu/drm/i915/i915_pmu.c static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
pmu                82 drivers/gpu/drm/i915/i915_pmu.c 	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
pmu                90 drivers/gpu/drm/i915/i915_pmu.c 	enable = pmu->enable;
pmu               121 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = &i915->pmu;
pmu               123 drivers/gpu/drm/i915/i915_pmu.c 	if (!pmu->base.event_init)
pmu               126 drivers/gpu/drm/i915/i915_pmu.c 	spin_lock_irq(&pmu->lock);
pmu               131 drivers/gpu/drm/i915/i915_pmu.c 	pmu->timer_enabled = pmu_needs_timer(pmu, false);
pmu               132 drivers/gpu/drm/i915/i915_pmu.c 	spin_unlock_irq(&pmu->lock);
pmu               135 drivers/gpu/drm/i915/i915_pmu.c static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
pmu               137 drivers/gpu/drm/i915/i915_pmu.c 	if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
pmu               138 drivers/gpu/drm/i915/i915_pmu.c 		pmu->timer_enabled = true;
pmu               139 drivers/gpu/drm/i915/i915_pmu.c 		pmu->timer_last = ktime_get();
pmu               140 drivers/gpu/drm/i915/i915_pmu.c 		hrtimer_start_range_ns(&pmu->timer,
pmu               148 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = &i915->pmu;
pmu               150 drivers/gpu/drm/i915/i915_pmu.c 	if (!pmu->base.event_init)
pmu               153 drivers/gpu/drm/i915/i915_pmu.c 	spin_lock_irq(&pmu->lock);
pmu               157 drivers/gpu/drm/i915/i915_pmu.c 	__i915_pmu_maybe_start_timer(pmu);
pmu               158 drivers/gpu/drm/i915/i915_pmu.c 	spin_unlock_irq(&pmu->lock);
pmu               174 drivers/gpu/drm/i915/i915_pmu.c 	if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
pmu               178 drivers/gpu/drm/i915/i915_pmu.c 		struct intel_engine_pmu *pmu = &engine->pmu;
pmu               193 drivers/gpu/drm/i915/i915_pmu.c 			add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
pmu               195 drivers/gpu/drm/i915/i915_pmu.c 			add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
pmu               210 drivers/gpu/drm/i915/i915_pmu.c 			add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
pmu               229 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = &i915->pmu;
pmu               231 drivers/gpu/drm/i915/i915_pmu.c 	if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
pmu               241 drivers/gpu/drm/i915/i915_pmu.c 		add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
pmu               246 drivers/gpu/drm/i915/i915_pmu.c 	if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
pmu               247 drivers/gpu/drm/i915/i915_pmu.c 		add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
pmu               256 drivers/gpu/drm/i915/i915_pmu.c 		container_of(hrtimer, struct drm_i915_private, pmu.timer);
pmu               257 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = &i915->pmu;
pmu               262 drivers/gpu/drm/i915/i915_pmu.c 	if (!READ_ONCE(pmu->timer_enabled))
pmu               266 drivers/gpu/drm/i915/i915_pmu.c 	period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
pmu               267 drivers/gpu/drm/i915/i915_pmu.c 	pmu->timer_last = now;
pmu               302 drivers/gpu/drm/i915/i915_pmu.c 		container_of(event->pmu, typeof(*i915), pmu.base);
pmu               372 drivers/gpu/drm/i915/i915_pmu.c 		container_of(event->pmu, typeof(*i915), pmu.base);
pmu               396 drivers/gpu/drm/i915/i915_pmu.c 		container_of(event->pmu, typeof(*i915), pmu.base);
pmu               399 drivers/gpu/drm/i915/i915_pmu.c 	if (event->attr.type != event->pmu->type)
pmu               453 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = &i915->pmu;
pmu               469 drivers/gpu/drm/i915/i915_pmu.c 		spin_lock_irqsave(&pmu->lock, flags);
pmu               471 drivers/gpu/drm/i915/i915_pmu.c 		if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
pmu               472 drivers/gpu/drm/i915/i915_pmu.c 			pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
pmu               473 drivers/gpu/drm/i915/i915_pmu.c 			pmu->sample[__I915_SAMPLE_RC6].cur = val;
pmu               475 drivers/gpu/drm/i915/i915_pmu.c 			val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
pmu               478 drivers/gpu/drm/i915/i915_pmu.c 		spin_unlock_irqrestore(&pmu->lock, flags);
pmu               489 drivers/gpu/drm/i915/i915_pmu.c 		spin_lock_irqsave(&pmu->lock, flags);
pmu               505 drivers/gpu/drm/i915/i915_pmu.c 			if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
pmu               506 drivers/gpu/drm/i915/i915_pmu.c 				pmu->suspended_time_last = val;
pmu               508 drivers/gpu/drm/i915/i915_pmu.c 			val -= pmu->suspended_time_last;
pmu               509 drivers/gpu/drm/i915/i915_pmu.c 			val += pmu->sample[__I915_SAMPLE_RC6].cur;
pmu               511 drivers/gpu/drm/i915/i915_pmu.c 			pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
pmu               512 drivers/gpu/drm/i915/i915_pmu.c 		} else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
pmu               513 drivers/gpu/drm/i915/i915_pmu.c 			val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
pmu               515 drivers/gpu/drm/i915/i915_pmu.c 			val = pmu->sample[__I915_SAMPLE_RC6].cur;
pmu               518 drivers/gpu/drm/i915/i915_pmu.c 		spin_unlock_irqrestore(&pmu->lock, flags);
pmu               530 drivers/gpu/drm/i915/i915_pmu.c 		container_of(event->pmu, typeof(*i915), pmu.base);
pmu               531 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = &i915->pmu;
pmu               548 drivers/gpu/drm/i915/i915_pmu.c 			val = engine->pmu.sample[sample].cur;
pmu               554 drivers/gpu/drm/i915/i915_pmu.c 			   div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
pmu               559 drivers/gpu/drm/i915/i915_pmu.c 			   div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
pmu               592 drivers/gpu/drm/i915/i915_pmu.c 		container_of(event->pmu, typeof(*i915), pmu.base);
pmu               594 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = &i915->pmu;
pmu               597 drivers/gpu/drm/i915/i915_pmu.c 	spin_lock_irqsave(&pmu->lock, flags);
pmu               603 drivers/gpu/drm/i915/i915_pmu.c 	BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
pmu               604 drivers/gpu/drm/i915/i915_pmu.c 	GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
pmu               605 drivers/gpu/drm/i915/i915_pmu.c 	GEM_BUG_ON(pmu->enable_count[bit] == ~0);
pmu               606 drivers/gpu/drm/i915/i915_pmu.c 	pmu->enable |= BIT_ULL(bit);
pmu               607 drivers/gpu/drm/i915/i915_pmu.c 	pmu->enable_count[bit]++;
pmu               612 drivers/gpu/drm/i915/i915_pmu.c 	__i915_pmu_maybe_start_timer(pmu);
pmu               626 drivers/gpu/drm/i915/i915_pmu.c 		BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
pmu               628 drivers/gpu/drm/i915/i915_pmu.c 		BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
pmu               630 drivers/gpu/drm/i915/i915_pmu.c 		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
pmu               631 drivers/gpu/drm/i915/i915_pmu.c 		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
pmu               632 drivers/gpu/drm/i915/i915_pmu.c 		GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
pmu               634 drivers/gpu/drm/i915/i915_pmu.c 		engine->pmu.enable |= BIT(sample);
pmu               635 drivers/gpu/drm/i915/i915_pmu.c 		engine->pmu.enable_count[sample]++;
pmu               638 drivers/gpu/drm/i915/i915_pmu.c 	spin_unlock_irqrestore(&pmu->lock, flags);
pmu               651 drivers/gpu/drm/i915/i915_pmu.c 		container_of(event->pmu, typeof(*i915), pmu.base);
pmu               653 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = &i915->pmu;
pmu               656 drivers/gpu/drm/i915/i915_pmu.c 	spin_lock_irqsave(&pmu->lock, flags);
pmu               666 drivers/gpu/drm/i915/i915_pmu.c 		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
pmu               667 drivers/gpu/drm/i915/i915_pmu.c 		GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
pmu               668 drivers/gpu/drm/i915/i915_pmu.c 		GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
pmu               674 drivers/gpu/drm/i915/i915_pmu.c 		if (--engine->pmu.enable_count[sample] == 0)
pmu               675 drivers/gpu/drm/i915/i915_pmu.c 			engine->pmu.enable &= ~BIT(sample);
pmu               678 drivers/gpu/drm/i915/i915_pmu.c 	GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
pmu               679 drivers/gpu/drm/i915/i915_pmu.c 	GEM_BUG_ON(pmu->enable_count[bit] == 0);
pmu               684 drivers/gpu/drm/i915/i915_pmu.c 	if (--pmu->enable_count[bit] == 0) {
pmu               685 drivers/gpu/drm/i915/i915_pmu.c 		pmu->enable &= ~BIT_ULL(bit);
pmu               686 drivers/gpu/drm/i915/i915_pmu.c 		pmu->timer_enabled &= pmu_needs_timer(pmu, true);
pmu               689 drivers/gpu/drm/i915/i915_pmu.c 	spin_unlock_irqrestore(&pmu->lock, flags);
pmu               838 drivers/gpu/drm/i915/i915_pmu.c create_event_attributes(struct i915_pmu *pmu)
pmu               840 drivers/gpu/drm/i915/i915_pmu.c 	struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
pmu               953 drivers/gpu/drm/i915/i915_pmu.c 	pmu->i915_attr = i915_attr;
pmu               954 drivers/gpu/drm/i915/i915_pmu.c 	pmu->pmu_attr = pmu_attr;
pmu               970 drivers/gpu/drm/i915/i915_pmu.c static void free_event_attributes(struct i915_pmu *pmu)
pmu               978 drivers/gpu/drm/i915/i915_pmu.c 	kfree(pmu->i915_attr);
pmu               979 drivers/gpu/drm/i915/i915_pmu.c 	kfree(pmu->pmu_attr);
pmu               982 drivers/gpu/drm/i915/i915_pmu.c 	pmu->i915_attr = NULL;
pmu               983 drivers/gpu/drm/i915/i915_pmu.c 	pmu->pmu_attr = NULL;
pmu               988 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
pmu               990 drivers/gpu/drm/i915/i915_pmu.c 	GEM_BUG_ON(!pmu->base.event_init);
pmu              1001 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
pmu              1004 drivers/gpu/drm/i915/i915_pmu.c 	GEM_BUG_ON(!pmu->base.event_init);
pmu              1011 drivers/gpu/drm/i915/i915_pmu.c 			perf_pmu_migrate_context(&pmu->base, cpu, target);
pmu              1020 drivers/gpu/drm/i915/i915_pmu.c static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
pmu              1033 drivers/gpu/drm/i915/i915_pmu.c 	ret = cpuhp_state_add_instance(slot, &pmu->node);
pmu              1043 drivers/gpu/drm/i915/i915_pmu.c static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
pmu              1046 drivers/gpu/drm/i915/i915_pmu.c 	WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
pmu              1052 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = &i915->pmu;
pmu              1060 drivers/gpu/drm/i915/i915_pmu.c 	i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
pmu              1066 drivers/gpu/drm/i915/i915_pmu.c 	pmu->base.attr_groups	= i915_pmu_attr_groups;
pmu              1067 drivers/gpu/drm/i915/i915_pmu.c 	pmu->base.task_ctx_nr	= perf_invalid_context;
pmu              1068 drivers/gpu/drm/i915/i915_pmu.c 	pmu->base.event_init	= i915_pmu_event_init;
pmu              1069 drivers/gpu/drm/i915/i915_pmu.c 	pmu->base.add		= i915_pmu_event_add;
pmu              1070 drivers/gpu/drm/i915/i915_pmu.c 	pmu->base.del		= i915_pmu_event_del;
pmu              1071 drivers/gpu/drm/i915/i915_pmu.c 	pmu->base.start		= i915_pmu_event_start;
pmu              1072 drivers/gpu/drm/i915/i915_pmu.c 	pmu->base.stop		= i915_pmu_event_stop;
pmu              1073 drivers/gpu/drm/i915/i915_pmu.c 	pmu->base.read		= i915_pmu_event_read;
pmu              1074 drivers/gpu/drm/i915/i915_pmu.c 	pmu->base.event_idx	= i915_pmu_event_event_idx;
pmu              1076 drivers/gpu/drm/i915/i915_pmu.c 	spin_lock_init(&pmu->lock);
pmu              1077 drivers/gpu/drm/i915/i915_pmu.c 	hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu              1078 drivers/gpu/drm/i915/i915_pmu.c 	pmu->timer.function = i915_sample;
pmu              1080 drivers/gpu/drm/i915/i915_pmu.c 	ret = perf_pmu_register(&pmu->base, "i915", -1);
pmu              1084 drivers/gpu/drm/i915/i915_pmu.c 	ret = i915_pmu_register_cpuhp_state(pmu);
pmu              1091 drivers/gpu/drm/i915/i915_pmu.c 	perf_pmu_unregister(&pmu->base);
pmu              1093 drivers/gpu/drm/i915/i915_pmu.c 	pmu->base.event_init = NULL;
pmu              1094 drivers/gpu/drm/i915/i915_pmu.c 	free_event_attributes(pmu);
pmu              1100 drivers/gpu/drm/i915/i915_pmu.c 	struct i915_pmu *pmu = &i915->pmu;
pmu              1102 drivers/gpu/drm/i915/i915_pmu.c 	if (!pmu->base.event_init)
pmu              1105 drivers/gpu/drm/i915/i915_pmu.c 	WARN_ON(pmu->enable);
pmu              1107 drivers/gpu/drm/i915/i915_pmu.c 	hrtimer_cancel(&pmu->timer);
pmu              1109 drivers/gpu/drm/i915/i915_pmu.c 	i915_pmu_unregister_cpuhp_state(pmu);
pmu              1111 drivers/gpu/drm/i915/i915_pmu.c 	perf_pmu_unregister(&pmu->base);
pmu              1112 drivers/gpu/drm/i915/i915_pmu.c 	pmu->base.event_init = NULL;
pmu              1113 drivers/gpu/drm/i915/i915_pmu.c 	free_event_attributes(pmu);
pmu                48 drivers/gpu/drm/i915/i915_pmu.h 	struct pmu base;
pmu                47 drivers/gpu/drm/lima/lima_device.c 	LIMA_IP_DESC(pmu,         false, false, 0x02000, 0x02000, pmu,      "pmu"),
pmu               151 drivers/gpu/drm/nouveau/include/nvkm/core/device.h 	struct nvkm_pmu *pmu;
pmu               224 drivers/gpu/drm/nouveau/include/nvkm/core/device.h 	int (*pmu     )(struct nvkm_device *, int idx, struct nvkm_pmu **);
pmu              1158 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gt215_pmu_new,
pmu              1192 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gt215_pmu_new,
pmu              1225 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gt215_pmu_new,
pmu              1322 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gt215_pmu_new,
pmu              1358 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gf100_pmu_new,
pmu              1395 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gf100_pmu_new,
pmu              1431 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gf100_pmu_new,
pmu              1467 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gf100_pmu_new,
pmu              1504 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gf100_pmu_new,
pmu              1541 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gf100_pmu_new,
pmu              1578 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gf100_pmu_new,
pmu              1649 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gf119_pmu_new,
pmu              1685 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gk104_pmu_new,
pmu              1724 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gk104_pmu_new,
pmu              1763 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gk104_pmu_new,
pmu              1795 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gk20a_pmu_new,
pmu              1827 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gk110_pmu_new,
pmu              1865 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gk110_pmu_new,
pmu              1903 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gk208_pmu_new,
pmu              1941 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gk208_pmu_new,
pmu              1979 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gm107_pmu_new,
pmu              2013 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gm107_pmu_new,
pmu              2046 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gm107_pmu_new,
pmu              2081 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gm107_pmu_new,
pmu              2116 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gm107_pmu_new,
pmu              2145 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gm20b_pmu_new,
pmu              2177 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp100_pmu_new,
pmu              2213 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp102_pmu_new,
pmu              2249 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp102_pmu_new,
pmu              2285 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp102_pmu_new,
pmu              2321 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp102_pmu_new,
pmu              2357 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp102_pmu_new,
pmu              2387 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gm20b_pmu_new,
pmu              2416 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp102_pmu_new,
pmu              2457 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp102_pmu_new,
pmu              2492 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp102_pmu_new,
pmu              2527 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp102_pmu_new,
pmu              2562 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp102_pmu_new,
pmu              2597 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	.pmu = gp102_pmu_new,
pmu              2660 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 	_(PMU     , device->pmu     , &device->pmu->subdev);
pmu              3166 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c 		_(NVKM_SUBDEV_PMU     ,      pmu);
pmu              1956 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 	nvkm_pmu_pgob(device->pmu, false);
pmu              2019 drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c 	nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
pmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec)
pmu                38 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 	nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
pmu                41 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 			nvkm_wr32(device, 0x10a188, (pmu + i) >> 8);
pmu                52 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c pmu_data(struct nv50_devinit *init, u32 pmu, u32 img, u32 len)
pmu                58 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 	nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu);
pmu                87 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 	struct nvbios_pmuR pmu;
pmu                89 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 	if (!nvbios_pmuRm(bios, type, &pmu))
pmu                95 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 	pmu_code(init, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
pmu                96 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 	pmu_code(init, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
pmu                97 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 	pmu_data(init, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
pmu               100 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 		*init_addr_pmu = pmu.init_addr_pmu;
pmu               101 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 		*args_addr_pmu = pmu.args_addr_pmu;
pmu               105 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 	return pmu_exec(init, pmu.init_addr_pmu), 0;
pmu               143 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 		u32 pmu = pmu_args(init, args + 0x08, 0x08);
pmu               146 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 		pmu_data(init, pmu, img, len);
pmu               151 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 		u32 pmu = pmu_args(init, args + 0x08, 0x10);
pmu               154 drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c 		pmu_data(init, pmu, img, len);
pmu                61 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h 	int ret = nvkm_memx_init(fb->subdev.device->pmu, &ram->memx);
pmu               149 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h 	return nvkm_memx_train_result(fb->subdev.device->pmu, result, rsize);
pmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	struct nvkm_pmu *pmu = device->pmu;
pmu                37 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	if (pmu && pmu->func->code.size)
pmu                48 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
pmu                50 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	if (pmu && pmu->func->pgob)
pmu                51 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 		pmu->func->pgob(pmu, enable);
pmu                57 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work);
pmu                58 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	return pmu->func->recv(pmu);
pmu                62 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
pmu                65 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	if (!pmu || !pmu->func->send)
pmu                67 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	return pmu->func->send(pmu, reply, process, message, data0, data1);
pmu                73 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
pmu                74 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	if (!pmu->func->intr)
pmu                76 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	pmu->func->intr(pmu);
pmu                82 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
pmu                84 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	if (pmu->func->fini)
pmu                85 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 		pmu->func->fini(pmu);
pmu                87 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	flush_work(&pmu->recv.work);
pmu                92 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c nvkm_pmu_reset(struct nvkm_pmu *pmu)
pmu                94 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	struct nvkm_device *device = pmu->subdev.device;
pmu                96 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	if (!pmu->func->enabled(pmu))
pmu               107 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	if (pmu->func->reset)
pmu               108 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 		pmu->func->reset(pmu);
pmu               122 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
pmu               123 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	return nvkm_pmu_reset(pmu);
pmu               129 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
pmu               130 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	int ret = nvkm_pmu_reset(pmu);
pmu               131 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	if (ret == 0 && pmu->func->init)
pmu               132 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 		ret = pmu->func->init(pmu);
pmu               139 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
pmu               140 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
pmu               146 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	struct nvkm_pmu *pmu = nvkm_pmu(subdev);
pmu               147 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	nvkm_msgqueue_del(&pmu->queue);
pmu               148 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	nvkm_falcon_del(&pmu->falcon);
pmu               164 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	      int index, struct nvkm_pmu *pmu)
pmu               166 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
pmu               167 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	pmu->func = func;
pmu               168 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
pmu               169 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	init_waitqueue_head(&pmu->recv.wait);
pmu               177 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	struct nvkm_pmu *pmu;
pmu               178 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c 	if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
pmu                30 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c gf100_pmu_reset(struct nvkm_pmu *pmu)
pmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c 	struct nvkm_device *device = pmu->subdev.device;
pmu                38 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c gf100_pmu_enabled(struct nvkm_pmu *pmu)
pmu                40 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c 	return nvkm_mc_enabled(pmu->subdev.device, NVKM_SUBDEV_PMU);
pmu                57 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
pmu                59 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c 	struct nvkm_device *device = pmu->subdev.device;
pmu                32 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c gk110_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
pmu                34 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c 	struct nvkm_device *device = pmu->subdev.device;
pmu                51 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state)
pmu                53 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
pmu                59 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state)
pmu                61 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
pmu                67 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu,
pmu                70 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct gk20a_pmu_dvfs_data *data = pmu->data;
pmu                71 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct nvkm_clk *clk = pmu->base.subdev.device->clk;
pmu                86 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n",
pmu                95 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu,
pmu                98 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct nvkm_falcon *falcon = pmu->base.falcon;
pmu               105 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c gk20a_pmu_dvfs_reset_dev_status(struct gk20a_pmu *pmu)
pmu               107 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct nvkm_falcon *falcon = pmu->base.falcon;
pmu               116 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct gk20a_pmu *pmu =
pmu               118 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct gk20a_pmu_dvfs_data *data = pmu->data;
pmu               120 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct nvkm_subdev *subdev = &pmu->base.subdev;
pmu               135 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	gk20a_pmu_dvfs_get_dev_status(pmu, &status);
pmu               145 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	gk20a_pmu_dvfs_get_cur_state(pmu, &state);
pmu               147 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	if (gk20a_pmu_dvfs_get_target_state(pmu, &state, data->avg_load)) {
pmu               149 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 		gk20a_pmu_dvfs_target(pmu, &state);
pmu               153 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	gk20a_pmu_dvfs_reset_dev_status(pmu);
pmu               158 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c gk20a_pmu_fini(struct nvkm_pmu *pmu)
pmu               160 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
pmu               161 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	nvkm_timer_alarm(pmu->subdev.device->timer, 0, &gpmu->alarm);
pmu               163 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	nvkm_falcon_put(pmu->falcon, &pmu->subdev);
pmu               167 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c gk20a_pmu_init(struct nvkm_pmu *pmu)
pmu               169 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct gk20a_pmu *gpmu = gk20a_pmu(pmu);
pmu               170 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct nvkm_subdev *subdev = &pmu->subdev;
pmu               171 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct nvkm_device *device = pmu->subdev.device;
pmu               172 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct nvkm_falcon *falcon = pmu->falcon;
pmu               208 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	struct gk20a_pmu *pmu;
pmu               210 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	if (!(pmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
pmu               212 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	*ppmu = &pmu->base;
pmu               214 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	nvkm_pmu_ctor(&gk20a_pmu, device, index, &pmu->base);
pmu               216 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	pmu->data = &gk20a_dvfs_data;
pmu               217 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c 	nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
pmu                28 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c gm20b_pmu_recv(struct nvkm_pmu *pmu)
pmu                30 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c 	if (!pmu->queue) {
pmu                31 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c 		nvkm_warn(&pmu->subdev,
pmu                36 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c 	nvkm_msgqueue_recv(pmu->queue);
pmu                27 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c gp102_pmu_reset(struct nvkm_pmu *pmu)
pmu                29 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c 	struct nvkm_device *device = pmu->subdev.device;
pmu                35 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c gp102_pmu_enabled(struct nvkm_pmu *pmu)
pmu                37 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c 	return !(nvkm_rd32(pmu->subdev.device, 0x10a3c0) & 0x00000001);
pmu                30 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
pmu                33 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	struct nvkm_subdev *subdev = &pmu->subdev;
pmu                54 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 		pmu->recv.message = message;
pmu                55 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 		pmu->recv.process = process;
pmu                65 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 				pmu->send.base));
pmu                77 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 		wait_event(pmu->recv.wait, (pmu->recv.process == 0));
pmu                78 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 		reply[0] = pmu->recv.data[0];
pmu                79 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 		reply[1] = pmu->recv.data[1];
pmu                87 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c gt215_pmu_recv(struct nvkm_pmu *pmu)
pmu                89 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	struct nvkm_subdev *subdev = &pmu->subdev;
pmu               105 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 				pmu->recv.base));
pmu               116 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	if (pmu->recv.process) {
pmu               117 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 		if (process == pmu->recv.process &&
pmu               118 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 		    message == pmu->recv.message) {
pmu               119 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 			pmu->recv.data[0] = data0;
pmu               120 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 			pmu->recv.data[1] = data1;
pmu               121 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 			pmu->recv.process = 0;
pmu               122 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 			wake_up(&pmu->recv.wait);
pmu               139 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c gt215_pmu_intr(struct nvkm_pmu *pmu)
pmu               141 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	struct nvkm_subdev *subdev = &pmu->subdev;
pmu               158 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 		schedule_work(&pmu->recv.work);
pmu               178 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c gt215_pmu_fini(struct nvkm_pmu *pmu)
pmu               180 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	nvkm_wr32(pmu->subdev.device, 0x10a014, 0x00000060);
pmu               184 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c gt215_pmu_reset(struct nvkm_pmu *pmu)
pmu               186 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	struct nvkm_device *device = pmu->subdev.device;
pmu               193 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c gt215_pmu_enabled(struct nvkm_pmu *pmu)
pmu               195 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	return nvkm_rd32(pmu->subdev.device, 0x022210) & 0x00000001;
pmu               199 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c gt215_pmu_init(struct nvkm_pmu *pmu)
pmu               201 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	struct nvkm_device *device = pmu->subdev.device;
pmu               206 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	for (i = 0; i < pmu->func->data.size / 4; i++)
pmu               207 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 		nvkm_wr32(device, 0x10a1c4, pmu->func->data.data[i]);
pmu               211 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	for (i = 0; i < pmu->func->code.size / 4; i++) {
pmu               214 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 		nvkm_wr32(device, 0x10a184, pmu->func->code.data[i]);
pmu               228 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	pmu->send.base = nvkm_rd32(device, 0x10a4d0) & 0x0000ffff;
pmu               229 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	pmu->send.size = nvkm_rd32(device, 0x10a4d0) >> 16;
pmu               237 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	pmu->recv.base = nvkm_rd32(device, 0x10a4dc) & 0x0000ffff;
pmu               238 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c 	pmu->recv.size = nvkm_rd32(device, 0x10a4dc) >> 16;
pmu                 7 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	struct nvkm_pmu *pmu;
pmu                20 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	struct nvkm_device *device = memx->pmu->subdev.device;
pmu                44 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx)
pmu                46 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	struct nvkm_device *device = pmu->subdev.device;
pmu                51 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
pmu                59 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	memx->pmu = pmu;
pmu                75 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	struct nvkm_pmu *pmu = memx->pmu;
pmu                76 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	struct nvkm_subdev *subdev = &pmu->subdev;
pmu                89 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 		nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
pmu               102 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	nvkm_debug(&memx->pmu->subdev, "R[%06x] = %08x\n", addr, data);
pmu               110 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n",
pmu               119 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	nvkm_debug(&memx->pmu->subdev, "    DELAY = %d ns\n", nsec);
pmu               127 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	struct nvkm_subdev *subdev = &memx->pmu->subdev;
pmu               161 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	nvkm_debug(&memx->pmu->subdev, "   MEM TRAIN\n");
pmu               166 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c nvkm_memx_train_result(struct nvkm_pmu *pmu, u32 *res, int rsize)
pmu               168 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	struct nvkm_device *device = pmu->subdev.device;
pmu               172 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO,
pmu               194 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	nvkm_debug(&memx->pmu->subdev, "   HOST BLOCKED\n");
pmu               201 drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c 	nvkm_debug(&memx->pmu->subdev, "   HOST UNBLOCKED\n");
pmu              1021 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c 		queue = sb->subdev.device->pmu->queue;
pmu              1143 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c 	const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
pmu              1153 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c 	addr_args = pmu->falcon->data.limit;
pmu               121 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c 	const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
pmu               129 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c 	addr_args = pmu->falcon->data.limit;
pmu                35 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c 	const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
pmu                43 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c 	addr_args = pmu->falcon->data.limit;
pmu               136 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c 		sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon;
pmu               142 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c 		sb->halt_falcon = subdev->device->pmu->falcon;
pmu               105 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 	struct nvkm_pmu *pmu = sb->subdev.device->pmu;
pmu               113 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 	ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
pmu               114 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 				sb, &pmu->queue);
pmu               125 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 	struct nvkm_pmu *pmu = device->pmu;
pmu               126 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 	u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
pmu               129 drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c 	ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
pmu                24 drivers/hwtracing/coresight/coresight-etm-perf.c static struct pmu etm_pmu;
pmu                30 drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c 	if (regmap_read(cec->pmu, EXYNOS_HDMI_PHY_CONTROL, &reg)) {
pmu                37 drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c 	if (regmap_write(cec->pmu, EXYNOS_HDMI_PHY_CONTROL, reg)) {
pmu               207 drivers/media/platform/s5p-cec/s5p_cec.c 	cec->pmu = syscon_regmap_lookup_by_phandle(dev->of_node,
pmu               209 drivers/media/platform/s5p-cec/s5p_cec.c 	if (IS_ERR(cec->pmu))
pmu                66 drivers/media/platform/s5p-cec/s5p_cec.h 	struct regmap           *pmu;
pmu               544 drivers/net/wireless/broadcom/b43/phy_lp.c 	crystalfreq = bus->chipco.pmu.crystalfreq * 1000;
pmu              1275 drivers/net/wireless/broadcom/b43/phy_lp.c 	u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
pmu              2392 drivers/net/wireless/broadcom/b43/phy_lp.c 	u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
pmu              2483 drivers/net/wireless/broadcom/b43/phy_lp.c 	u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
pmu              1022 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 	struct brcmf_core *pmu;
pmu              1039 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 	pmu = brcmf_chip_get_pmu(pub); /* after reading cc_caps_ext */
pmu              1042 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 					CORE_CC_REG(pmu->base, pmucapabilities));
pmu              1144 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 	struct brcmf_core *pmu;
pmu              1149 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 		pmu = brcmf_chip_get_core(pub, BCMA_CORE_PMU);
pmu              1150 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 		if (pmu)
pmu              1151 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 			return pmu;
pmu              1328 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 	struct brcmf_core *pmu = brcmf_chip_get_pmu(pub);
pmu              1350 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 		addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
pmu              1352 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 		addr = CORE_CC_REG(pmu->base, chipcontrol_data);
pmu              1365 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 		addr = CORE_CC_REG(pmu->base, retention_ctl);
pmu              1370 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 		addr = CORE_CC_REG(pmu->base, pmucapabilities_ext);
pmu              1375 drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c 		addr = CORE_CC_REG(pmu->base, retention_ctl);
pmu               553 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c #define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
pmu              3770 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		struct brcmf_core *pmu = brcmf_chip_get_pmu(ci);
pmu              3778 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c 		addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
pmu               102 drivers/perf/arm-cci.c 	struct pmu pmu;
pmu               115 drivers/perf/arm-cci.c #define to_cci_pmu(c)	(container_of(c, struct cci_pmu, pmu))
pmu               801 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
pmu               819 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
pmu               877 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
pmu              1086 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
pmu              1096 drivers/perf/arm-cci.c static void cci_pmu_enable(struct pmu *pmu)
pmu              1098 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
pmu              1112 drivers/perf/arm-cci.c static void cci_pmu_disable(struct pmu *pmu)
pmu              1114 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
pmu              1135 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
pmu              1169 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
pmu              1192 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
pmu              1217 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
pmu              1229 drivers/perf/arm-cci.c static int validate_event(struct pmu *cci_pmu,
pmu              1241 drivers/perf/arm-cci.c 	if (event->pmu != cci_pmu)
pmu              1256 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
pmu              1267 drivers/perf/arm-cci.c 	if (!validate_event(event->pmu, &fake_pmu, leader))
pmu              1271 drivers/perf/arm-cci.c 		if (!validate_event(event->pmu, &fake_pmu, sibling))
pmu              1275 drivers/perf/arm-cci.c 	if (!validate_event(event->pmu, &fake_pmu, event))
pmu              1319 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
pmu              1323 drivers/perf/arm-cci.c 	if (event->attr.type != event->pmu->type)
pmu              1365 drivers/perf/arm-cci.c 	struct pmu *pmu = dev_get_drvdata(dev);
pmu              1366 drivers/perf/arm-cci.c 	struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
pmu              1414 drivers/perf/arm-cci.c 	cci_pmu->pmu = (struct pmu) {
pmu              1441 drivers/perf/arm-cci.c 	return perf_pmu_register(&cci_pmu->pmu, name, -1);
pmu              1455 drivers/perf/arm-cci.c 	perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target);
pmu              1713 drivers/perf/arm-cci.c 	perf_pmu_unregister(&g_cci_pmu->pmu);
pmu               143 drivers/perf/arm-ccn.c 	struct arm_ccn_dt, pmu), struct arm_ccn, dt)
pmu               166 drivers/perf/arm-ccn.c 	struct pmu pmu;
pmu               638 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
pmu               696 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
pmu               727 drivers/perf/arm-ccn.c 	if (event->attr.type != event->pmu->type)
pmu               730 drivers/perf/arm-ccn.c 	ccn = pmu_to_arm_ccn(event->pmu);
pmu               836 drivers/perf/arm-ccn.c 	if (event->group_leader->pmu != event->pmu &&
pmu               841 drivers/perf/arm-ccn.c 		if (sibling->pmu != event->pmu &&
pmu               875 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
pmu               891 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
pmu               924 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
pmu               950 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
pmu               998 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
pmu              1019 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
pmu              1056 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
pmu              1102 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
pmu              1129 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
pmu              1144 drivers/perf/arm-ccn.c static void arm_ccn_pmu_enable(struct pmu *pmu)
pmu              1146 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
pmu              1153 drivers/perf/arm-ccn.c static void arm_ccn_pmu_disable(struct pmu *pmu)
pmu              1155 drivers/perf/arm-ccn.c 	struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
pmu              1216 drivers/perf/arm-ccn.c 	perf_pmu_migrate_context(&dt->pmu, cpu, target);
pmu              1271 drivers/perf/arm-ccn.c 	ccn->dt.pmu = (struct pmu) {
pmu              1309 drivers/perf/arm-ccn.c 	err = perf_pmu_register(&ccn->dt.pmu, name, -1);
pmu              1338 drivers/perf/arm-ccn.c 	perf_pmu_unregister(&ccn->dt.pmu);
pmu               113 drivers/perf/arm_dsu_pmu.c 	struct pmu			pmu;
pmu               127 drivers/perf/arm_dsu_pmu.c static inline struct dsu_pmu *to_dsu_pmu(struct pmu *pmu)
pmu               129 drivers/perf/arm_dsu_pmu.c 	return container_of(pmu, struct dsu_pmu, pmu);
pmu               155 drivers/perf/arm_dsu_pmu.c 	struct pmu *pmu = dev_get_drvdata(dev);
pmu               156 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
pmu               201 drivers/perf/arm_dsu_pmu.c 	struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
pmu               202 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
pmu               252 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
pmu               260 drivers/perf/arm_dsu_pmu.c 		dev_err(event->pmu->dev,
pmu               278 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
pmu               286 drivers/perf/arm_dsu_pmu.c 		dev_err(event->pmu->dev,
pmu               304 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
pmu               339 drivers/perf/arm_dsu_pmu.c 		dev_err(event->pmu->dev,
pmu               417 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
pmu               431 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
pmu               442 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
pmu               468 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
pmu               479 drivers/perf/arm_dsu_pmu.c static void dsu_pmu_enable(struct pmu *pmu)
pmu               483 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
pmu               496 drivers/perf/arm_dsu_pmu.c static void dsu_pmu_disable(struct pmu *pmu)
pmu               500 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
pmu               509 drivers/perf/arm_dsu_pmu.c static bool dsu_pmu_validate_event(struct pmu *pmu,
pmu               516 drivers/perf/arm_dsu_pmu.c 	if (event->pmu != pmu)
pmu               534 drivers/perf/arm_dsu_pmu.c 	if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader))
pmu               537 drivers/perf/arm_dsu_pmu.c 		if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling))
pmu               540 drivers/perf/arm_dsu_pmu.c 	return dsu_pmu_validate_event(event->pmu, &fake_hw, event);
pmu               545 drivers/perf/arm_dsu_pmu.c 	struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
pmu               547 drivers/perf/arm_dsu_pmu.c 	if (event->attr.type != event->pmu->type)
pmu               552 drivers/perf/arm_dsu_pmu.c 		dev_dbg(dsu_pmu->pmu.dev, "Can't support sampling events\n");
pmu               558 drivers/perf/arm_dsu_pmu.c 		dev_dbg(dsu_pmu->pmu.dev, "Can't support per-task counters\n");
pmu               563 drivers/perf/arm_dsu_pmu.c 		dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n");
pmu               568 drivers/perf/arm_dsu_pmu.c 		dev_dbg(dsu_pmu->pmu.dev,
pmu               716 drivers/perf/arm_dsu_pmu.c 	dsu_pmu->pmu = (struct pmu) {
pmu               732 drivers/perf/arm_dsu_pmu.c 	rc = perf_pmu_register(&dsu_pmu->pmu, name, -1);
pmu               746 drivers/perf/arm_dsu_pmu.c 	perf_pmu_unregister(&dsu_pmu->pmu);
pmu               801 drivers/perf/arm_dsu_pmu.c 	perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
pmu               105 drivers/perf/arm_pmu.c 	if (type == event->pmu->type)
pmu               122 drivers/perf/arm_pmu.c 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
pmu               164 drivers/perf/arm_pmu.c 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
pmu               194 drivers/perf/arm_pmu.c 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
pmu               210 drivers/perf/arm_pmu.c 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
pmu               235 drivers/perf/arm_pmu.c 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
pmu               251 drivers/perf/arm_pmu.c 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
pmu               284 drivers/perf/arm_pmu.c validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
pmu               297 drivers/perf/arm_pmu.c 	if (event->pmu != pmu)
pmu               306 drivers/perf/arm_pmu.c 	armpmu = to_arm_pmu(event->pmu);
pmu               322 drivers/perf/arm_pmu.c 	if (!validate_event(event->pmu, &fake_pmu, leader))
pmu               326 drivers/perf/arm_pmu.c 		if (!validate_event(event->pmu, &fake_pmu, sibling))
pmu               330 drivers/perf/arm_pmu.c 	if (!validate_event(event->pmu, &fake_pmu, event))
pmu               363 drivers/perf/arm_pmu.c 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
pmu               424 drivers/perf/arm_pmu.c 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
pmu               447 drivers/perf/arm_pmu.c static void armpmu_enable(struct pmu *pmu)
pmu               449 drivers/perf/arm_pmu.c 	struct arm_pmu *armpmu = to_arm_pmu(pmu);
pmu               461 drivers/perf/arm_pmu.c static void armpmu_disable(struct pmu *pmu)
pmu               463 drivers/perf/arm_pmu.c 	struct arm_pmu *armpmu = to_arm_pmu(pmu);
pmu               479 drivers/perf/arm_pmu.c 	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
pmu               603 drivers/perf/arm_pmu.c static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
pmu               605 drivers/perf/arm_pmu.c 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
pmu               617 drivers/perf/arm_pmu.c 	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
pmu               620 drivers/perf/arm_pmu.c 	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
pmu               622 drivers/perf/arm_pmu.c 	if (pmu->reset)
pmu               623 drivers/perf/arm_pmu.c 		pmu->reset(pmu);
pmu               625 drivers/perf/arm_pmu.c 	per_cpu(cpu_armpmu, cpu) = pmu;
pmu               627 drivers/perf/arm_pmu.c 	irq = armpmu_get_cpu_irq(pmu, cpu);
pmu               640 drivers/perf/arm_pmu.c 	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
pmu               643 drivers/perf/arm_pmu.c 	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
pmu               646 drivers/perf/arm_pmu.c 	irq = armpmu_get_cpu_irq(pmu, cpu);
pmu               783 drivers/perf/arm_pmu.c 	struct arm_pmu *pmu;
pmu               786 drivers/perf/arm_pmu.c 	pmu = kzalloc(sizeof(*pmu), flags);
pmu               787 drivers/perf/arm_pmu.c 	if (!pmu) {
pmu               792 drivers/perf/arm_pmu.c 	pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
pmu               793 drivers/perf/arm_pmu.c 	if (!pmu->hw_events) {
pmu               798 drivers/perf/arm_pmu.c 	pmu->pmu = (struct pmu) {
pmu               808 drivers/perf/arm_pmu.c 		.attr_groups	= pmu->attr_groups,
pmu               819 drivers/perf/arm_pmu.c 	pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
pmu               825 drivers/perf/arm_pmu.c 		events = per_cpu_ptr(pmu->hw_events, cpu);
pmu               827 drivers/perf/arm_pmu.c 		events->percpu_pmu = pmu;
pmu               830 drivers/perf/arm_pmu.c 	return pmu;
pmu               833 drivers/perf/arm_pmu.c 	kfree(pmu);
pmu               849 drivers/perf/arm_pmu.c void armpmu_free(struct arm_pmu *pmu)
pmu               851 drivers/perf/arm_pmu.c 	free_percpu(pmu->hw_events);
pmu               852 drivers/perf/arm_pmu.c 	kfree(pmu);
pmu               855 drivers/perf/arm_pmu.c int armpmu_register(struct arm_pmu *pmu)
pmu               859 drivers/perf/arm_pmu.c 	ret = cpu_pmu_init(pmu);
pmu               863 drivers/perf/arm_pmu.c 	if (!pmu->set_event_filter)
pmu               864 drivers/perf/arm_pmu.c 		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
pmu               866 drivers/perf/arm_pmu.c 	ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
pmu               871 drivers/perf/arm_pmu.c 		__oprofile_cpu_pmu = pmu;
pmu               874 drivers/perf/arm_pmu.c 		pmu->name, pmu->num_events);
pmu               879 drivers/perf/arm_pmu.c 	cpu_pmu_destroy(pmu);
pmu               191 drivers/perf/arm_pmu_acpi.c 	struct arm_pmu *pmu;
pmu               195 drivers/perf/arm_pmu_acpi.c 		pmu = per_cpu(probed_pmus, cpu);
pmu               196 drivers/perf/arm_pmu_acpi.c 		if (!pmu || pmu->acpi_cpuid != cpuid)
pmu               199 drivers/perf/arm_pmu_acpi.c 		return pmu;
pmu               202 drivers/perf/arm_pmu_acpi.c 	pmu = armpmu_alloc_atomic();
pmu               203 drivers/perf/arm_pmu_acpi.c 	if (!pmu) {
pmu               209 drivers/perf/arm_pmu_acpi.c 	pmu->acpi_cpuid = cpuid;
pmu               211 drivers/perf/arm_pmu_acpi.c 	return pmu;
pmu               218 drivers/perf/arm_pmu_acpi.c static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
pmu               220 drivers/perf/arm_pmu_acpi.c 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
pmu               226 drivers/perf/arm_pmu_acpi.c 	for_each_cpu(cpu, &pmu->supported_cpus) {
pmu               254 drivers/perf/arm_pmu_acpi.c 	struct arm_pmu *pmu;
pmu               264 drivers/perf/arm_pmu_acpi.c 	pmu = arm_pmu_acpi_find_alloc_pmu();
pmu               265 drivers/perf/arm_pmu_acpi.c 	if (!pmu)
pmu               268 drivers/perf/arm_pmu_acpi.c 	per_cpu(probed_pmus, cpu) = pmu;
pmu               270 drivers/perf/arm_pmu_acpi.c 	if (pmu_irq_matches(pmu, irq)) {
pmu               271 drivers/perf/arm_pmu_acpi.c 		hw_events = pmu->hw_events;
pmu               275 drivers/perf/arm_pmu_acpi.c 	cpumask_set_cpu(cpu, &pmu->supported_cpus);
pmu               306 drivers/perf/arm_pmu_acpi.c 		struct arm_pmu *pmu = per_cpu(probed_pmus, cpu);
pmu               309 drivers/perf/arm_pmu_acpi.c 		if (!pmu || pmu->name)
pmu               312 drivers/perf/arm_pmu_acpi.c 		ret = init_fn(pmu);
pmu               321 drivers/perf/arm_pmu_acpi.c 		base_name = pmu->name;
pmu               322 drivers/perf/arm_pmu_acpi.c 		pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++);
pmu               323 drivers/perf/arm_pmu_acpi.c 		if (!pmu->name) {
pmu               328 drivers/perf/arm_pmu_acpi.c 		ret = armpmu_register(pmu);
pmu               331 drivers/perf/arm_pmu_acpi.c 			kfree(pmu->name);
pmu                25 drivers/perf/arm_pmu_platform.c static int probe_current_pmu(struct arm_pmu *pmu,
pmu                37 drivers/perf/arm_pmu_platform.c 		ret = info->init(pmu);
pmu                45 drivers/perf/arm_pmu_platform.c static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
pmu                48 drivers/perf/arm_pmu_platform.c 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
pmu                50 drivers/perf/arm_pmu_platform.c 	ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
pmu                54 drivers/perf/arm_pmu_platform.c 	for_each_cpu(cpu, &pmu->supported_cpus)
pmu                96 drivers/perf/arm_pmu_platform.c static int pmu_parse_irqs(struct arm_pmu *pmu)
pmu                99 drivers/perf/arm_pmu_platform.c 	struct platform_device *pdev = pmu->plat_device;
pmu               100 drivers/perf/arm_pmu_platform.c 	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
pmu               114 drivers/perf/arm_pmu_platform.c 		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
pmu               115 drivers/perf/arm_pmu_platform.c 		cpumask_setall(&pmu->supported_cpus);
pmu               122 drivers/perf/arm_pmu_platform.c 			return pmu_parse_percpu_irq(pmu, irq);
pmu               154 drivers/perf/arm_pmu_platform.c 		cpumask_set_cpu(cpu, &pmu->supported_cpus);
pmu               197 drivers/perf/arm_pmu_platform.c 	struct arm_pmu *pmu;
pmu               200 drivers/perf/arm_pmu_platform.c 	pmu = armpmu_alloc();
pmu               201 drivers/perf/arm_pmu_platform.c 	if (!pmu)
pmu               204 drivers/perf/arm_pmu_platform.c 	pmu->plat_device = pdev;
pmu               206 drivers/perf/arm_pmu_platform.c 	ret = pmu_parse_irqs(pmu);
pmu               213 drivers/perf/arm_pmu_platform.c 		pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
pmu               217 drivers/perf/arm_pmu_platform.c 		if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
pmu               219 drivers/perf/arm_pmu_platform.c 			pmu->secure_access = false;
pmu               222 drivers/perf/arm_pmu_platform.c 		ret = init_fn(pmu);
pmu               224 drivers/perf/arm_pmu_platform.c 		cpumask_setall(&pmu->supported_cpus);
pmu               225 drivers/perf/arm_pmu_platform.c 		ret = probe_current_pmu(pmu, probe_table);
pmu               233 drivers/perf/arm_pmu_platform.c 	ret = armpmu_request_irqs(pmu);
pmu               237 drivers/perf/arm_pmu_platform.c 	ret = armpmu_register(pmu);
pmu               244 drivers/perf/arm_pmu_platform.c 	armpmu_free_irqs(pmu);
pmu               247 drivers/perf/arm_pmu_platform.c 	armpmu_free(pmu);
pmu               108 drivers/perf/arm_smmuv3_pmu.c 	struct pmu pmu;
pmu               118 drivers/perf/arm_smmuv3_pmu.c #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
pmu               132 drivers/perf/arm_smmuv3_pmu.c static inline void smmu_pmu_enable(struct pmu *pmu)
pmu               134 drivers/perf/arm_smmuv3_pmu.c 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
pmu               141 drivers/perf/arm_smmuv3_pmu.c static inline void smmu_pmu_disable(struct pmu *pmu)
pmu               143 drivers/perf/arm_smmuv3_pmu.c 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
pmu               205 drivers/perf/arm_smmuv3_pmu.c 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
pmu               253 drivers/perf/arm_smmuv3_pmu.c 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
pmu               326 drivers/perf/arm_smmuv3_pmu.c 	if (new->pmu != curr->pmu)
pmu               329 drivers/perf/arm_smmuv3_pmu.c 	if (to_smmu_pmu(new->pmu)->global_filter &&
pmu               344 drivers/perf/arm_smmuv3_pmu.c 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
pmu               350 drivers/perf/arm_smmuv3_pmu.c 	if (event->attr.type != event->pmu->type)
pmu               404 drivers/perf/arm_smmuv3_pmu.c 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
pmu               417 drivers/perf/arm_smmuv3_pmu.c 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
pmu               434 drivers/perf/arm_smmuv3_pmu.c 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
pmu               459 drivers/perf/arm_smmuv3_pmu.c 	struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
pmu               598 drivers/perf/arm_smmuv3_pmu.c 	perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
pmu               644 drivers/perf/arm_smmuv3_pmu.c 	struct smmu_pmu *pmu = dev_get_drvdata(dev);
pmu               649 drivers/perf/arm_smmuv3_pmu.c 	writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
pmu               650 drivers/perf/arm_smmuv3_pmu.c 	writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
pmu               652 drivers/perf/arm_smmuv3_pmu.c 		       pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
pmu               655 drivers/perf/arm_smmuv3_pmu.c static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
pmu               658 drivers/perf/arm_smmuv3_pmu.c 	struct device *dev = pmu->dev;
pmu               662 drivers/perf/arm_smmuv3_pmu.c 	writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
pmu               665 drivers/perf/arm_smmuv3_pmu.c 	if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
pmu               676 drivers/perf/arm_smmuv3_pmu.c 		pmu->irq = desc->irq;
pmu               682 drivers/perf/arm_smmuv3_pmu.c static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
pmu               687 drivers/perf/arm_smmuv3_pmu.c 	smmu_pmu_setup_msi(pmu);
pmu               689 drivers/perf/arm_smmuv3_pmu.c 	irq = pmu->irq;
pmu               691 drivers/perf/arm_smmuv3_pmu.c 		ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
pmu               692 drivers/perf/arm_smmuv3_pmu.c 				       flags, "smmuv3-pmu", pmu);
pmu               700 drivers/perf/arm_smmuv3_pmu.c 	smmu_pmu_disable(&smmu_pmu->pmu);
pmu               744 drivers/perf/arm_smmuv3_pmu.c 	smmu_pmu->pmu = (struct pmu) {
pmu               821 drivers/perf/arm_smmuv3_pmu.c 	err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
pmu               844 drivers/perf/arm_smmuv3_pmu.c 	perf_pmu_unregister(&smmu_pmu->pmu);
pmu               854 drivers/perf/arm_smmuv3_pmu.c 	smmu_pmu_disable(&smmu_pmu->pmu);
pmu                51 drivers/perf/arm_spe_pmu.c 	struct pmu				pmu;
pmu                75 drivers/perf/arm_spe_pmu.c #define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
pmu               285 drivers/perf/arm_spe_pmu.c 	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
pmu               360 drivers/perf/arm_spe_pmu.c 	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
pmu               388 drivers/perf/arm_spe_pmu.c 	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
pmu               459 drivers/perf/arm_spe_pmu.c 	struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu);
pmu               663 drivers/perf/arm_spe_pmu.c 	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
pmu               666 drivers/perf/arm_spe_pmu.c 	if (attr->type != event->pmu->type)
pmu               715 drivers/perf/arm_spe_pmu.c 	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
pmu               748 drivers/perf/arm_spe_pmu.c 	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
pmu               790 drivers/perf/arm_spe_pmu.c 	struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
pmu               885 drivers/perf/arm_spe_pmu.c 	spe_pmu->pmu = (struct pmu) {
pmu               917 drivers/perf/arm_spe_pmu.c 	return perf_pmu_register(&spe_pmu->pmu, name, -1);
pmu               922 drivers/perf/arm_spe_pmu.c 	perf_pmu_unregister(&spe_pmu->pmu);
pmu                40 drivers/perf/fsl_imx8_ddr_perf.c #define to_ddr_pmu(p)		container_of(p, struct ddr_pmu, pmu)
pmu                69 drivers/perf/fsl_imx8_ddr_perf.c 	struct pmu pmu;
pmu                85 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = dev_get_drvdata(dev);
pmu                87 drivers/perf/fsl_imx8_ddr_perf.c 	return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
pmu               205 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
pmu               207 drivers/perf/fsl_imx8_ddr_perf.c 	filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
pmu               212 drivers/perf/fsl_imx8_ddr_perf.c static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
pmu               222 drivers/perf/fsl_imx8_ddr_perf.c 		if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
pmu               229 drivers/perf/fsl_imx8_ddr_perf.c 		if (pmu->events[i] == NULL)
pmu               236 drivers/perf/fsl_imx8_ddr_perf.c static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
pmu               238 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->events[counter] = NULL;
pmu               241 drivers/perf/fsl_imx8_ddr_perf.c static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
pmu               243 drivers/perf/fsl_imx8_ddr_perf.c 	struct perf_event *event = pmu->events[counter];
pmu               244 drivers/perf/fsl_imx8_ddr_perf.c 	void __iomem *base = pmu->base;
pmu               258 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
pmu               262 drivers/perf/fsl_imx8_ddr_perf.c 	if (event->attr.type != event->pmu->type)
pmu               269 drivers/perf/fsl_imx8_ddr_perf.c 		dev_warn(pmu->dev, "Can't provide per-task data!\n");
pmu               278 drivers/perf/fsl_imx8_ddr_perf.c 	if (event->group_leader->pmu != event->pmu &&
pmu               282 drivers/perf/fsl_imx8_ddr_perf.c 	if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
pmu               292 drivers/perf/fsl_imx8_ddr_perf.c 		if (sibling->pmu != event->pmu &&
pmu               297 drivers/perf/fsl_imx8_ddr_perf.c 	event->cpu = pmu->cpu;
pmu               306 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
pmu               313 drivers/perf/fsl_imx8_ddr_perf.c 		new_raw_count = ddr_perf_read_counter(pmu, counter);
pmu               322 drivers/perf/fsl_imx8_ddr_perf.c static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
pmu               335 drivers/perf/fsl_imx8_ddr_perf.c 		writel(0, pmu->base + reg);
pmu               338 drivers/perf/fsl_imx8_ddr_perf.c 		writel(val, pmu->base + reg);
pmu               341 drivers/perf/fsl_imx8_ddr_perf.c 		val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
pmu               342 drivers/perf/fsl_imx8_ddr_perf.c 		writel(val, pmu->base + reg);
pmu               348 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
pmu               354 drivers/perf/fsl_imx8_ddr_perf.c 	ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
pmu               361 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
pmu               367 drivers/perf/fsl_imx8_ddr_perf.c 	if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
pmu               371 drivers/perf/fsl_imx8_ddr_perf.c 			if (pmu->events[i] &&
pmu               372 drivers/perf/fsl_imx8_ddr_perf.c 			    !ddr_perf_filters_compatible(event, pmu->events[i]))
pmu               379 drivers/perf/fsl_imx8_ddr_perf.c 			writel(cfg1, pmu->base + COUNTER_DPCR1);
pmu               383 drivers/perf/fsl_imx8_ddr_perf.c 	counter = ddr_perf_alloc_counter(pmu, cfg);
pmu               385 drivers/perf/fsl_imx8_ddr_perf.c 		dev_dbg(pmu->dev, "There are not enough counters\n");
pmu               389 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->events[counter] = event;
pmu               390 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->active_events++;
pmu               403 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
pmu               407 drivers/perf/fsl_imx8_ddr_perf.c 	ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
pmu               415 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
pmu               421 drivers/perf/fsl_imx8_ddr_perf.c 	ddr_perf_free_counter(pmu, counter);
pmu               422 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->active_events--;
pmu               426 drivers/perf/fsl_imx8_ddr_perf.c static void ddr_perf_pmu_enable(struct pmu *pmu)
pmu               428 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
pmu               438 drivers/perf/fsl_imx8_ddr_perf.c static void ddr_perf_pmu_disable(struct pmu *pmu)
pmu               440 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
pmu               449 drivers/perf/fsl_imx8_ddr_perf.c static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
pmu               452 drivers/perf/fsl_imx8_ddr_perf.c 	*pmu = (struct ddr_pmu) {
pmu               453 drivers/perf/fsl_imx8_ddr_perf.c 		.pmu = (struct pmu) {
pmu               470 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
pmu               471 drivers/perf/fsl_imx8_ddr_perf.c 	return pmu->id;
pmu               477 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = (struct ddr_pmu *) p;
pmu               481 drivers/perf/fsl_imx8_ddr_perf.c 	ddr_perf_counter_enable(pmu,
pmu               497 drivers/perf/fsl_imx8_ddr_perf.c 		if (!pmu->events[i])
pmu               500 drivers/perf/fsl_imx8_ddr_perf.c 		event = pmu->events[i];
pmu               508 drivers/perf/fsl_imx8_ddr_perf.c 	ddr_perf_counter_enable(pmu,
pmu               520 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
pmu               523 drivers/perf/fsl_imx8_ddr_perf.c 	if (cpu != pmu->cpu)
pmu               530 drivers/perf/fsl_imx8_ddr_perf.c 	perf_pmu_migrate_context(&pmu->pmu, cpu, target);
pmu               531 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->cpu = target;
pmu               533 drivers/perf/fsl_imx8_ddr_perf.c 	WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
pmu               540 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu;
pmu               554 drivers/perf/fsl_imx8_ddr_perf.c 	pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
pmu               555 drivers/perf/fsl_imx8_ddr_perf.c 	if (!pmu)
pmu               558 drivers/perf/fsl_imx8_ddr_perf.c 	num = ddr_perf_init(pmu, base, &pdev->dev);
pmu               560 drivers/perf/fsl_imx8_ddr_perf.c 	platform_set_drvdata(pdev, pmu);
pmu               567 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->devtype_data = of_device_get_match_data(&pdev->dev);
pmu               569 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->cpu = raw_smp_processor_id();
pmu               580 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->cpuhp_state = ret;
pmu               583 drivers/perf/fsl_imx8_ddr_perf.c 	ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
pmu               601 drivers/perf/fsl_imx8_ddr_perf.c 					pmu);
pmu               607 drivers/perf/fsl_imx8_ddr_perf.c 	pmu->irq = irq;
pmu               608 drivers/perf/fsl_imx8_ddr_perf.c 	ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
pmu               610 drivers/perf/fsl_imx8_ddr_perf.c 		dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
pmu               614 drivers/perf/fsl_imx8_ddr_perf.c 	ret = perf_pmu_register(&pmu->pmu, name, -1);
pmu               621 drivers/perf/fsl_imx8_ddr_perf.c 	cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
pmu               623 drivers/perf/fsl_imx8_ddr_perf.c 	cpuhp_remove_multi_state(pmu->cpuhp_state);
pmu               625 drivers/perf/fsl_imx8_ddr_perf.c 	ida_simple_remove(&ddr_ida, pmu->id);
pmu               632 drivers/perf/fsl_imx8_ddr_perf.c 	struct ddr_pmu *pmu = platform_get_drvdata(pdev);
pmu               634 drivers/perf/fsl_imx8_ddr_perf.c 	cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
pmu               635 drivers/perf/fsl_imx8_ddr_perf.c 	cpuhp_remove_multi_state(pmu->cpuhp_state);
pmu               636 drivers/perf/fsl_imx8_ddr_perf.c 	irq_set_affinity_hint(pmu->irq, NULL);
pmu               638 drivers/perf/fsl_imx8_ddr_perf.c 	perf_pmu_unregister(&pmu->pmu);
pmu               640 drivers/perf/fsl_imx8_ddr_perf.c 	ida_simple_remove(&ddr_ida, pmu->id);
pmu               145 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c 	struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
pmu               382 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c 	ddrc_pmu->pmu = (struct pmu) {
pmu               397 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c 	ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
pmu               411 drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c 	perf_pmu_unregister(&ddrc_pmu->pmu);
pmu               393 drivers/perf/hisilicon/hisi_uncore_hha_pmu.c 	hha_pmu->pmu = (struct pmu) {
pmu               408 drivers/perf/hisilicon/hisi_uncore_hha_pmu.c 	ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
pmu               422 drivers/perf/hisilicon/hisi_uncore_hha_pmu.c 	perf_pmu_unregister(&hha_pmu->pmu);
pmu               383 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c 	l3c_pmu->pmu = (struct pmu) {
pmu               398 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c 	ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
pmu               412 drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c 	perf_pmu_unregister(&l3c_pmu->pmu);
pmu                65 drivers/perf/hisilicon/hisi_uncore_pmu.c 	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
pmu                74 drivers/perf/hisilicon/hisi_uncore_pmu.c 		if (leader->pmu != event->pmu)
pmu                85 drivers/perf/hisilicon/hisi_uncore_pmu.c 		if (sibling->pmu != event->pmu)
pmu               102 drivers/perf/hisilicon/hisi_uncore_pmu.c 	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
pmu               131 drivers/perf/hisilicon/hisi_uncore_pmu.c 	if (event->attr.type != event->pmu->type)
pmu               156 drivers/perf/hisilicon/hisi_uncore_pmu.c 	hisi_pmu = to_hisi_pmu(event->pmu);
pmu               182 drivers/perf/hisilicon/hisi_uncore_pmu.c 	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
pmu               197 drivers/perf/hisilicon/hisi_uncore_pmu.c 	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
pmu               206 drivers/perf/hisilicon/hisi_uncore_pmu.c 	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
pmu               225 drivers/perf/hisilicon/hisi_uncore_pmu.c 	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
pmu               245 drivers/perf/hisilicon/hisi_uncore_pmu.c 	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
pmu               283 drivers/perf/hisilicon/hisi_uncore_pmu.c 	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
pmu               305 drivers/perf/hisilicon/hisi_uncore_pmu.c 	struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu);
pmu               320 drivers/perf/hisilicon/hisi_uncore_pmu.c void hisi_uncore_pmu_enable(struct pmu *pmu)
pmu               322 drivers/perf/hisilicon/hisi_uncore_pmu.c 	struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
pmu               332 drivers/perf/hisilicon/hisi_uncore_pmu.c void hisi_uncore_pmu_disable(struct pmu *pmu)
pmu               334 drivers/perf/hisilicon/hisi_uncore_pmu.c 	struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu);
pmu               430 drivers/perf/hisilicon/hisi_uncore_pmu.c 	perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
pmu                24 drivers/perf/hisilicon/hisi_uncore_pmu.h #define to_hisi_pmu(p)	(container_of(p, struct hisi_pmu, pmu))
pmu                58 drivers/perf/hisilicon/hisi_uncore_pmu.h 	struct pmu pmu;
pmu                89 drivers/perf/hisilicon/hisi_uncore_pmu.h void hisi_uncore_pmu_enable(struct pmu *pmu);
pmu                90 drivers/perf/hisilicon/hisi_uncore_pmu.h void hisi_uncore_pmu_disable(struct pmu *pmu);
pmu               154 drivers/perf/qcom_l2_pmu.c 	struct pmu pmu;
pmu               192 drivers/perf/qcom_l2_pmu.c #define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
pmu               462 drivers/perf/qcom_l2_pmu.c static void l2_cache_pmu_enable(struct pmu *pmu)
pmu               475 drivers/perf/qcom_l2_pmu.c static void l2_cache_pmu_disable(struct pmu *pmu)
pmu               487 drivers/perf/qcom_l2_pmu.c 	if (event->attr.type != event->pmu->type)
pmu               490 drivers/perf/qcom_l2_pmu.c 	l2cache_pmu = to_l2cache_pmu(event->pmu);
pmu               514 drivers/perf/qcom_l2_pmu.c 	if (event->group_leader->pmu != event->pmu &&
pmu               522 drivers/perf/qcom_l2_pmu.c 		if (sibling->pmu != event->pmu &&
pmu               593 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
pmu               637 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
pmu               663 drivers/perf/qcom_l2_pmu.c 	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
pmu               876 drivers/perf/qcom_l2_pmu.c 	perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
pmu               952 drivers/perf/qcom_l2_pmu.c 	l2cache_pmu->pmu = (struct pmu) {
pmu               999 drivers/perf/qcom_l2_pmu.c 	err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1);
pmu              1021 drivers/perf/qcom_l2_pmu.c 	perf_pmu_unregister(&l2cache_pmu->pmu);
pmu               155 drivers/perf/qcom_l3_pmu.c 	struct pmu		pmu;
pmu               163 drivers/perf/qcom_l3_pmu.c #define to_l3cache_pmu(p) (container_of(p, struct l3cache_pmu, pmu))
pmu               195 drivers/perf/qcom_l3_pmu.c 	struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
pmu               227 drivers/perf/qcom_l3_pmu.c 	struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
pmu               241 drivers/perf/qcom_l3_pmu.c 	struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
pmu               276 drivers/perf/qcom_l3_pmu.c 	struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
pmu               302 drivers/perf/qcom_l3_pmu.c 	struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
pmu               318 drivers/perf/qcom_l3_pmu.c 	struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
pmu               427 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__pmu_enable(struct pmu *pmu)
pmu               429 drivers/perf/qcom_l3_pmu.c 	struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu);
pmu               437 drivers/perf/qcom_l3_pmu.c static void qcom_l3_cache__pmu_disable(struct pmu *pmu)
pmu               439 drivers/perf/qcom_l3_pmu.c 	struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu);
pmu               457 drivers/perf/qcom_l3_pmu.c 	if (leader->pmu != event->pmu && !is_software_event(leader))
pmu               466 drivers/perf/qcom_l3_pmu.c 		if (sibling->pmu != event->pmu)
pmu               480 drivers/perf/qcom_l3_pmu.c 	struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
pmu               486 drivers/perf/qcom_l3_pmu.c 	if (event->attr.type != event->pmu->type)
pmu               549 drivers/perf/qcom_l3_pmu.c 	struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
pmu               577 drivers/perf/qcom_l3_pmu.c 	struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu);
pmu               727 drivers/perf/qcom_l3_pmu.c 	perf_pmu_migrate_context(&l3pmu->pmu, cpu, target);
pmu               752 drivers/perf/qcom_l3_pmu.c 	l3pmu->pmu = (struct pmu) {
pmu               796 drivers/perf/qcom_l3_pmu.c 	ret = perf_pmu_register(&l3pmu->pmu, name, -1);
pmu               802 drivers/perf/qcom_l3_pmu.c 	dev_info(&pdev->dev, "Registered %s, type: %d\n", name, l3pmu->pmu.type);
pmu                67 drivers/perf/thunderx2_pmu.c 	struct pmu pmu;
pmu                90 drivers/perf/thunderx2_pmu.c static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu)
pmu                92 drivers/perf/thunderx2_pmu.c 	return container_of(pmu, struct tx2_uncore_pmu, pmu);
pmu               323 drivers/perf/thunderx2_pmu.c 	tx2_pmu = pmu_to_tx2_pmu(event->pmu);
pmu               366 drivers/perf/thunderx2_pmu.c static bool tx2_uncore_validate_event(struct pmu *pmu,
pmu               372 drivers/perf/thunderx2_pmu.c 	if (event->pmu != pmu)
pmu               391 drivers/perf/thunderx2_pmu.c 	if (!tx2_uncore_validate_event(event->pmu, leader, &counters))
pmu               395 drivers/perf/thunderx2_pmu.c 		if (!tx2_uncore_validate_event(event->pmu, sibling, &counters))
pmu               399 drivers/perf/thunderx2_pmu.c 	if (!tx2_uncore_validate_event(event->pmu, event, &counters))
pmu               416 drivers/perf/thunderx2_pmu.c 	if (event->attr.type != event->pmu->type)
pmu               430 drivers/perf/thunderx2_pmu.c 	tx2_pmu = pmu_to_tx2_pmu(event->pmu);
pmu               454 drivers/perf/thunderx2_pmu.c 	tx2_pmu = pmu_to_tx2_pmu(event->pmu);
pmu               476 drivers/perf/thunderx2_pmu.c 	tx2_pmu = pmu_to_tx2_pmu(event->pmu);
pmu               491 drivers/perf/thunderx2_pmu.c 	tx2_pmu = pmu_to_tx2_pmu(event->pmu);
pmu               511 drivers/perf/thunderx2_pmu.c 	struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
pmu               556 drivers/perf/thunderx2_pmu.c 	tx2_pmu->pmu = (struct pmu) {
pmu               569 drivers/perf/thunderx2_pmu.c 	tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
pmu               572 drivers/perf/thunderx2_pmu.c 	return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1);
pmu               606 drivers/perf/thunderx2_pmu.c 			tx2_pmu->pmu.name);
pmu               757 drivers/perf/thunderx2_pmu.c 	perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
pmu               807 drivers/perf/thunderx2_pmu.c 				perf_pmu_unregister(&tx2_pmu->pmu);
pmu                80 drivers/perf/xgene_pmu.c #define to_pmu_dev(p)     container_of(p, struct xgene_pmu_dev, pmu)
pmu                95 drivers/perf/xgene_pmu.c 	struct pmu pmu;
pmu               104 drivers/perf/xgene_pmu.c 	void (*mask_int)(struct xgene_pmu *pmu);
pmu               105 drivers/perf/xgene_pmu.c 	void (*unmask_int)(struct xgene_pmu *pmu);
pmu               106 drivers/perf/xgene_pmu.c 	u64 (*read_counter)(struct xgene_pmu_dev *pmu, int idx);
pmu               107 drivers/perf/xgene_pmu.c 	void (*write_counter)(struct xgene_pmu_dev *pmu, int idx, u64 val);
pmu               869 drivers/perf/xgene_pmu.c static void xgene_perf_pmu_enable(struct pmu *pmu)
pmu               871 drivers/perf/xgene_pmu.c 	struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
pmu               882 drivers/perf/xgene_pmu.c static void xgene_perf_pmu_disable(struct pmu *pmu)
pmu               884 drivers/perf/xgene_pmu.c 	struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
pmu               892 drivers/perf/xgene_pmu.c 	struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
pmu               897 drivers/perf/xgene_pmu.c 	if (event->attr.type != event->pmu->type)
pmu               934 drivers/perf/xgene_pmu.c 	if (event->group_leader->pmu != event->pmu &&
pmu               939 drivers/perf/xgene_pmu.c 		if (sibling->pmu != event->pmu &&
pmu               949 drivers/perf/xgene_pmu.c 	struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
pmu               965 drivers/perf/xgene_pmu.c 	struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
pmu               974 drivers/perf/xgene_pmu.c 	struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
pmu               992 drivers/perf/xgene_pmu.c 	struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
pmu              1017 drivers/perf/xgene_pmu.c 	struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
pmu              1060 drivers/perf/xgene_pmu.c 	struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
pmu              1081 drivers/perf/xgene_pmu.c 	struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
pmu              1109 drivers/perf/xgene_pmu.c 	pmu_dev->pmu = (struct pmu) {
pmu              1127 drivers/perf/xgene_pmu.c 	return perf_pmu_register(&pmu_dev->pmu, name, -1);
pmu              1134 drivers/perf/xgene_pmu.c 	struct xgene_pmu_dev *pmu;
pmu              1136 drivers/perf/xgene_pmu.c 	pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
pmu              1137 drivers/perf/xgene_pmu.c 	if (!pmu)
pmu              1139 drivers/perf/xgene_pmu.c 	pmu->parent = xgene_pmu;
pmu              1140 drivers/perf/xgene_pmu.c 	pmu->inf = &ctx->inf;
pmu              1141 drivers/perf/xgene_pmu.c 	ctx->pmu_dev = pmu;
pmu              1143 drivers/perf/xgene_pmu.c 	switch (pmu->inf->type) {
pmu              1145 drivers/perf/xgene_pmu.c 		if (!(xgene_pmu->l3c_active_mask & pmu->inf->enable_mask))
pmu              1148 drivers/perf/xgene_pmu.c 			pmu->attr_groups = l3c_pmu_v3_attr_groups;
pmu              1150 drivers/perf/xgene_pmu.c 			pmu->attr_groups = l3c_pmu_attr_groups;
pmu              1154 drivers/perf/xgene_pmu.c 			pmu->attr_groups = iob_fast_pmu_v3_attr_groups;
pmu              1156 drivers/perf/xgene_pmu.c 			pmu->attr_groups = iob_pmu_attr_groups;
pmu              1160 drivers/perf/xgene_pmu.c 			pmu->attr_groups = iob_slow_pmu_v3_attr_groups;
pmu              1163 drivers/perf/xgene_pmu.c 		if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
pmu              1166 drivers/perf/xgene_pmu.c 			pmu->attr_groups = mcb_pmu_v3_attr_groups;
pmu              1168 drivers/perf/xgene_pmu.c 			pmu->attr_groups = mcb_pmu_attr_groups;
pmu              1171 drivers/perf/xgene_pmu.c 		if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
pmu              1174 drivers/perf/xgene_pmu.c 			pmu->attr_groups = mc_pmu_v3_attr_groups;
pmu              1176 drivers/perf/xgene_pmu.c 			pmu->attr_groups = mc_pmu_attr_groups;
pmu              1182 drivers/perf/xgene_pmu.c 	if (xgene_init_perf(pmu, ctx->name)) {
pmu              1822 drivers/perf/xgene_pmu.c 		perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
pmu              1825 drivers/perf/xgene_pmu.c 		perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
pmu              1828 drivers/perf/xgene_pmu.c 		perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
pmu              1831 drivers/perf/xgene_pmu.c 		perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
pmu              1959 drivers/perf/xgene_pmu.c 		perf_pmu_unregister(&ctx->pmu_dev->pmu);
pmu               131 drivers/phy/allwinner/phy-sun4i-usb.c 		void __iomem *pmu;
pmu               245 drivers/phy/allwinner/phy-sun4i-usb.c 	if (!phy->pmu)
pmu               256 drivers/phy/allwinner/phy-sun4i-usb.c 	reg_value = readl(phy->pmu);
pmu               263 drivers/phy/allwinner/phy-sun4i-usb.c 	writel(reg_value, phy->pmu);
pmu               299 drivers/phy/allwinner/phy-sun4i-usb.c 		if (phy->pmu && data->cfg->enable_pmu_unk1) {
pmu               300 drivers/phy/allwinner/phy-sun4i-usb.c 			val = readl(phy->pmu + REG_PMU_UNK1);
pmu               301 drivers/phy/allwinner/phy-sun4i-usb.c 			writel(val & ~2, phy->pmu + REG_PMU_UNK1);
pmu               799 drivers/phy/allwinner/phy-sun4i-usb.c 			phy->pmu = devm_ioremap_resource(dev, res);
pmu               800 drivers/phy/allwinner/phy-sun4i-usb.c 			if (IS_ERR(phy->pmu))
pmu               801 drivers/phy/allwinner/phy-sun4i-usb.c 				return PTR_ERR(phy->pmu);
pmu                37 drivers/phy/allwinner/phy-sun9i-usb.c 	void __iomem *pmu;
pmu                56 drivers/phy/allwinner/phy-sun9i-usb.c 	reg_value = readl(phy->pmu);
pmu                63 drivers/phy/allwinner/phy-sun9i-usb.c 	writel(reg_value, phy->pmu);
pmu               160 drivers/phy/allwinner/phy-sun9i-usb.c 	phy->pmu = devm_ioremap_resource(dev, res);
pmu               161 drivers/phy/allwinner/phy-sun9i-usb.c 	if (IS_ERR(phy->pmu))
pmu               162 drivers/phy/allwinner/phy-sun9i-usb.c 		return PTR_ERR(phy->pmu);
pmu                68 drivers/pinctrl/mvebu/pinctrl-dove.c 	unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
pmu                71 drivers/pinctrl/mvebu/pinctrl-dove.c 	if ((pmu & BIT(pid)) == 0)
pmu                86 drivers/pinctrl/mvebu/pinctrl-dove.c 	unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
pmu                90 drivers/pinctrl/mvebu/pinctrl-dove.c 		writel(pmu & ~BIT(pid), data->base + PMU_MPP_GENERAL_CTRL);
pmu                94 drivers/pinctrl/mvebu/pinctrl-dove.c 	writel(pmu | BIT(pid), data->base + PMU_MPP_GENERAL_CTRL);
pmu               192 drivers/pinctrl/mvebu/pinctrl-dove.c 	unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
pmu               194 drivers/pinctrl/mvebu/pinctrl-dove.c 	*config = ((pmu & AU0_AC97_SEL) != 0);
pmu               202 drivers/pinctrl/mvebu/pinctrl-dove.c 	unsigned long pmu = readl(data->base + PMU_MPP_GENERAL_CTRL);
pmu               204 drivers/pinctrl/mvebu/pinctrl-dove.c 	pmu &= ~AU0_AC97_SEL;
pmu               206 drivers/pinctrl/mvebu/pinctrl-dove.c 		pmu |= AU0_AC97_SEL;
pmu               207 drivers/pinctrl/mvebu/pinctrl-dove.c 	writel(pmu, data->base + PMU_MPP_GENERAL_CTRL);
pmu               512 drivers/pinctrl/qcom/pinctrl-ipq4019.c 	FUNCTION(pmu),
pmu               630 drivers/pinctrl/qcom/pinctrl-ipq4019.c 	PINGROUP(54, qpic, blsp_spi0, i2s_td, NA, pmu, NA, NA, NA, tm, NA, NA,
pmu               632 drivers/pinctrl/qcom/pinctrl-ipq4019.c 	PINGROUP(55, qpic, blsp_spi0, i2s_td, NA, pmu, NA, NA, NA, tm, NA, NA,
pmu               278 drivers/regulator/bcm590xx-regulator.c 	struct bcm590xx_reg *pmu;
pmu               284 drivers/regulator/bcm590xx-regulator.c 	pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
pmu               285 drivers/regulator/bcm590xx-regulator.c 	if (!pmu)
pmu               288 drivers/regulator/bcm590xx-regulator.c 	pmu->mfd = bcm590xx;
pmu               290 drivers/regulator/bcm590xx-regulator.c 	platform_set_drvdata(pdev, pmu);
pmu               292 drivers/regulator/bcm590xx-regulator.c 	pmu->desc = devm_kcalloc(&pdev->dev,
pmu               296 drivers/regulator/bcm590xx-regulator.c 	if (!pmu->desc)
pmu               303 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].name = info->name;
pmu               304 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].of_match = of_match_ptr(info->name);
pmu               305 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].regulators_node = of_match_ptr("regulators");
pmu               306 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].supply_name = info->vin_name;
pmu               307 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].id = i;
pmu               308 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].volt_table = info->volt_table;
pmu               309 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].n_voltages = info->n_voltages;
pmu               310 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].linear_ranges = info->linear_ranges;
pmu               311 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].n_linear_ranges = info->n_linear_ranges;
pmu               314 drivers/regulator/bcm590xx-regulator.c 			pmu->desc[i].ops = &bcm590xx_ops_ldo;
pmu               315 drivers/regulator/bcm590xx-regulator.c 			pmu->desc[i].vsel_mask = BCM590XX_LDO_VSEL_MASK;
pmu               317 drivers/regulator/bcm590xx-regulator.c 			pmu->desc[i].ops = &bcm590xx_ops_vbus;
pmu               319 drivers/regulator/bcm590xx-regulator.c 			pmu->desc[i].ops = &bcm590xx_ops_dcdc;
pmu               320 drivers/regulator/bcm590xx-regulator.c 			pmu->desc[i].vsel_mask = BCM590XX_SR_VSEL_MASK;
pmu               324 drivers/regulator/bcm590xx-regulator.c 			pmu->desc[i].enable_mask = BCM590XX_VBUS_ENABLE;
pmu               326 drivers/regulator/bcm590xx-regulator.c 			pmu->desc[i].vsel_reg = bcm590xx_get_vsel_register(i);
pmu               327 drivers/regulator/bcm590xx-regulator.c 			pmu->desc[i].enable_is_inverted = true;
pmu               328 drivers/regulator/bcm590xx-regulator.c 			pmu->desc[i].enable_mask = BCM590XX_REG_ENABLE;
pmu               330 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].enable_reg = bcm590xx_get_enable_register(i);
pmu               331 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].type = REGULATOR_VOLTAGE;
pmu               332 drivers/regulator/bcm590xx-regulator.c 		pmu->desc[i].owner = THIS_MODULE;
pmu               335 drivers/regulator/bcm590xx-regulator.c 		config.driver_data = pmu;
pmu               341 drivers/regulator/bcm590xx-regulator.c 		rdev = devm_regulator_register(&pdev->dev, &pmu->desc[i],
pmu              1477 drivers/scsi/arcmsr/arcmsr_hba.c 		struct MessageUnit_D  *pmu = acb->pmuD;
pmu              1486 drivers/scsi/arcmsr/arcmsr_hba.c 				pmu->done_qbuffer[0].addressLow + 1;
pmu              1487 drivers/scsi/arcmsr/arcmsr_hba.c 			doneq_index = pmu->doneq_index;
pmu              1493 drivers/scsi/arcmsr/arcmsr_hba.c 				pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
pmu              1495 drivers/scsi/arcmsr/arcmsr_hba.c 				doneq_index = pmu->doneq_index;
pmu              1497 drivers/scsi/arcmsr/arcmsr_hba.c 				cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
pmu              1499 drivers/scsi/arcmsr/arcmsr_hba.c 				addressLow = pmu->done_qbuffer[doneq_index &
pmu              1513 drivers/scsi/arcmsr/arcmsr_hba.c 					pmu->outboundlist_read_pointer);
pmu              1519 drivers/scsi/arcmsr/arcmsr_hba.c 		pmu->postq_index = 0;
pmu              1520 drivers/scsi/arcmsr/arcmsr_hba.c 		pmu->doneq_index = 0x40FF;
pmu              1826 drivers/scsi/arcmsr/arcmsr_hba.c 		struct MessageUnit_D  *pmu = acb->pmuD;
pmu              1833 drivers/scsi/arcmsr/arcmsr_hba.c 		postq_index = pmu->postq_index;
pmu              1834 drivers/scsi/arcmsr/arcmsr_hba.c 		pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
pmu              1842 drivers/scsi/arcmsr/arcmsr_hba.c 		pmu->postq_index = index_stripped ? (index_stripped | toggle) :
pmu              1844 drivers/scsi/arcmsr/arcmsr_hba.c 		writel(postq_index, pmu->inboundlist_write_pointer);
pmu              1849 drivers/scsi/arcmsr/arcmsr_hba.c 		struct MessageUnit_E __iomem *pmu = acb->pmuE;
pmu              1854 drivers/scsi/arcmsr/arcmsr_hba.c 		writel(0, &pmu->inbound_queueport_high);
pmu              1855 drivers/scsi/arcmsr/arcmsr_hba.c 		writel(ccb_post_stamp, &pmu->inbound_queueport_low);
pmu              2321 drivers/scsi/arcmsr/arcmsr_hba.c 	struct MessageUnit_D  *pmu = pACB->pmuD;
pmu              2323 drivers/scsi/arcmsr/arcmsr_hba.c 	outbound_doorbell = readl(pmu->outbound_doorbell);
pmu              2325 drivers/scsi/arcmsr/arcmsr_hba.c 		writel(outbound_doorbell, pmu->outbound_doorbell);
pmu              2332 drivers/scsi/arcmsr/arcmsr_hba.c 		outbound_doorbell = readl(pmu->outbound_doorbell);
pmu              2450 drivers/scsi/arcmsr/arcmsr_hba.c 	struct MessageUnit_D  *pmu;
pmu              2456 drivers/scsi/arcmsr/arcmsr_hba.c 	pmu = acb->pmuD;
pmu              2457 drivers/scsi/arcmsr/arcmsr_hba.c 	outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
pmu              2458 drivers/scsi/arcmsr/arcmsr_hba.c 	doneq_index = pmu->doneq_index;
pmu              2464 drivers/scsi/arcmsr/arcmsr_hba.c 			pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
pmu              2466 drivers/scsi/arcmsr/arcmsr_hba.c 			doneq_index = pmu->doneq_index;
pmu              2467 drivers/scsi/arcmsr/arcmsr_hba.c 			cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
pmu              2469 drivers/scsi/arcmsr/arcmsr_hba.c 			addressLow = pmu->done_qbuffer[doneq_index &
pmu              2481 drivers/scsi/arcmsr/arcmsr_hba.c 			writel(doneq_index, pmu->outboundlist_read_pointer);
pmu              2486 drivers/scsi/arcmsr/arcmsr_hba.c 		pmu->outboundlist_interrupt_cause);
pmu              2487 drivers/scsi/arcmsr/arcmsr_hba.c 	readl(pmu->outboundlist_interrupt_cause);
pmu              2496 drivers/scsi/arcmsr/arcmsr_hba.c 	struct MessageUnit_E __iomem *pmu;
pmu              2502 drivers/scsi/arcmsr/arcmsr_hba.c 	pmu = acb->pmuE;
pmu              2503 drivers/scsi/arcmsr/arcmsr_hba.c 	while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
pmu              2514 drivers/scsi/arcmsr/arcmsr_hba.c 	writel(doneq_index, &pmu->reply_post_consumer_index);
pmu              2661 drivers/scsi/arcmsr/arcmsr_hba.c 	struct MessageUnit_D  *pmu = pACB->pmuD;
pmu              2663 drivers/scsi/arcmsr/arcmsr_hba.c 	host_interrupt_status = readl(pmu->host_int_status) &
pmu              2676 drivers/scsi/arcmsr/arcmsr_hba.c 		host_interrupt_status = readl(pmu->host_int_status);
pmu              2686 drivers/scsi/arcmsr/arcmsr_hba.c 	struct MessageUnit_E __iomem *pmu = pACB->pmuE;
pmu              2688 drivers/scsi/arcmsr/arcmsr_hba.c 	host_interrupt_status = readl(&pmu->host_int_status) &
pmu              2702 drivers/scsi/arcmsr/arcmsr_hba.c 		host_interrupt_status = readl(&pmu->host_int_status);
pmu              3506 drivers/scsi/arcmsr/arcmsr_hba.c 	struct MessageUnit_D *pmu = acb->pmuD;
pmu              3512 drivers/scsi/arcmsr/arcmsr_hba.c 		outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
pmu              3513 drivers/scsi/arcmsr/arcmsr_hba.c 		doneq_index = pmu->doneq_index;
pmu              3531 drivers/scsi/arcmsr/arcmsr_hba.c 		pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
pmu              3533 drivers/scsi/arcmsr/arcmsr_hba.c 		doneq_index = pmu->doneq_index;
pmu              3535 drivers/scsi/arcmsr/arcmsr_hba.c 		cdb_phy_hipart = pmu->done_qbuffer[doneq_index &
pmu              3537 drivers/scsi/arcmsr/arcmsr_hba.c 		flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
pmu              4060 drivers/scsi/arcmsr/arcmsr_hba.c 	struct MessageUnit_D *pmu = pACB->pmuD;
pmu              4063 drivers/scsi/arcmsr/arcmsr_hba.c 	writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
pmu              4072 drivers/scsi/arcmsr/arcmsr_hba.c 	struct MessageUnit_E __iomem *pmu = pACB->pmuE;
pmu              4075 drivers/scsi/arcmsr/arcmsr_hba.c 	writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
pmu              4077 drivers/scsi/arcmsr/arcmsr_hba.c 	writel(pACB->out_doorbell, &pmu->iobound_doorbell);
pmu                50 drivers/soc/dove/pmu.c 	struct pmu_data *pmu = rcdev_to_pmu(rc);
pmu                54 drivers/soc/dove/pmu.c 	spin_lock_irqsave(&pmu->lock, flags);
pmu                55 drivers/soc/dove/pmu.c 	val = readl_relaxed(pmu->pmc_base + PMC_SW_RST);
pmu                56 drivers/soc/dove/pmu.c 	writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST);
pmu                57 drivers/soc/dove/pmu.c 	writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST);
pmu                58 drivers/soc/dove/pmu.c 	spin_unlock_irqrestore(&pmu->lock, flags);
pmu                65 drivers/soc/dove/pmu.c 	struct pmu_data *pmu = rcdev_to_pmu(rc);
pmu                69 drivers/soc/dove/pmu.c 	spin_lock_irqsave(&pmu->lock, flags);
pmu                70 drivers/soc/dove/pmu.c 	val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
pmu                71 drivers/soc/dove/pmu.c 	writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
pmu                72 drivers/soc/dove/pmu.c 	spin_unlock_irqrestore(&pmu->lock, flags);
pmu                79 drivers/soc/dove/pmu.c 	struct pmu_data *pmu = rcdev_to_pmu(rc);
pmu                83 drivers/soc/dove/pmu.c 	spin_lock_irqsave(&pmu->lock, flags);
pmu                84 drivers/soc/dove/pmu.c 	val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
pmu                85 drivers/soc/dove/pmu.c 	writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
pmu                86 drivers/soc/dove/pmu.c 	spin_unlock_irqrestore(&pmu->lock, flags);
pmu               103 drivers/soc/dove/pmu.c static void __init pmu_reset_init(struct pmu_data *pmu)
pmu               107 drivers/soc/dove/pmu.c 	pmu->reset = pmu_reset;
pmu               108 drivers/soc/dove/pmu.c 	pmu->reset.of_node = pmu->of_node;
pmu               110 drivers/soc/dove/pmu.c 	ret = reset_controller_register(&pmu->reset);
pmu               115 drivers/soc/dove/pmu.c static void __init pmu_reset_init(struct pmu_data *pmu)
pmu               121 drivers/soc/dove/pmu.c 	struct pmu_data *pmu;
pmu               146 drivers/soc/dove/pmu.c 	struct pmu_data *pmu = pmu_dom->pmu;
pmu               149 drivers/soc/dove/pmu.c 	void __iomem *pmu_base = pmu->pmu_base;
pmu               150 drivers/soc/dove/pmu.c 	void __iomem *pmc_base = pmu->pmc_base;
pmu               152 drivers/soc/dove/pmu.c 	spin_lock_irqsave(&pmu->lock, flags);
pmu               172 drivers/soc/dove/pmu.c 	spin_unlock_irqrestore(&pmu->lock, flags);
pmu               180 drivers/soc/dove/pmu.c 	struct pmu_data *pmu = pmu_dom->pmu;
pmu               183 drivers/soc/dove/pmu.c 	void __iomem *pmu_base = pmu->pmu_base;
pmu               184 drivers/soc/dove/pmu.c 	void __iomem *pmc_base = pmu->pmc_base;
pmu               186 drivers/soc/dove/pmu.c 	spin_lock_irqsave(&pmu->lock, flags);
pmu               206 drivers/soc/dove/pmu.c 	spin_unlock_irqrestore(&pmu->lock, flags);
pmu               214 drivers/soc/dove/pmu.c 	unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR);
pmu               228 drivers/soc/dove/pmu.c 	struct pmu_data *pmu = irq_desc_get_handler_data(desc);
pmu               229 drivers/soc/dove/pmu.c 	struct irq_chip_generic *gc = pmu->irq_gc;
pmu               230 drivers/soc/dove/pmu.c 	struct irq_domain *domain = pmu->irq_domain;
pmu               266 drivers/soc/dove/pmu.c static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
pmu               274 drivers/soc/dove/pmu.c 	writel(0, pmu->pmc_base + PMC_IRQ_MASK);
pmu               275 drivers/soc/dove/pmu.c 	writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
pmu               277 drivers/soc/dove/pmu.c 	domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
pmu               295 drivers/soc/dove/pmu.c 	gc->reg_base = pmu->pmc_base;
pmu               300 drivers/soc/dove/pmu.c 	pmu->irq_domain = domain;
pmu               301 drivers/soc/dove/pmu.c 	pmu->irq_gc = gc;
pmu               303 drivers/soc/dove/pmu.c 	irq_set_handler_data(irq, pmu);
pmu               312 drivers/soc/dove/pmu.c 	struct pmu_data *pmu;
pmu               315 drivers/soc/dove/pmu.c 	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
pmu               316 drivers/soc/dove/pmu.c 	if (!pmu)
pmu               319 drivers/soc/dove/pmu.c 	spin_lock_init(&pmu->lock);
pmu               320 drivers/soc/dove/pmu.c 	pmu->pmc_base = initdata->pmc_base;
pmu               321 drivers/soc/dove/pmu.c 	pmu->pmu_base = initdata->pmu_base;
pmu               323 drivers/soc/dove/pmu.c 	pmu_reset_init(pmu);
pmu               330 drivers/soc/dove/pmu.c 			domain->pmu = pmu;
pmu               340 drivers/soc/dove/pmu.c 	ret = dove_init_pmu_irq(pmu, initdata->irq);
pmu               344 drivers/soc/dove/pmu.c 	if (pmu->irq_domain)
pmu               345 drivers/soc/dove/pmu.c 		irq_domain_associate_many(pmu->irq_domain,
pmu               376 drivers/soc/dove/pmu.c 	struct pmu_data *pmu;
pmu               390 drivers/soc/dove/pmu.c 	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
pmu               391 drivers/soc/dove/pmu.c 	if (!pmu)
pmu               394 drivers/soc/dove/pmu.c 	spin_lock_init(&pmu->lock);
pmu               395 drivers/soc/dove/pmu.c 	pmu->of_node = np_pmu;
pmu               396 drivers/soc/dove/pmu.c 	pmu->pmc_base = of_iomap(pmu->of_node, 0);
pmu               397 drivers/soc/dove/pmu.c 	pmu->pmu_base = of_iomap(pmu->of_node, 1);
pmu               398 drivers/soc/dove/pmu.c 	if (!pmu->pmc_base || !pmu->pmu_base) {
pmu               400 drivers/soc/dove/pmu.c 		iounmap(pmu->pmu_base);
pmu               401 drivers/soc/dove/pmu.c 		iounmap(pmu->pmc_base);
pmu               402 drivers/soc/dove/pmu.c 		kfree(pmu);
pmu               406 drivers/soc/dove/pmu.c 	pmu_reset_init(pmu);
pmu               416 drivers/soc/dove/pmu.c 		domain->pmu = pmu;
pmu               436 drivers/soc/dove/pmu.c 			if (args.np == pmu->of_node)
pmu               445 drivers/soc/dove/pmu.c 	parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
pmu               449 drivers/soc/dove/pmu.c 		ret = dove_init_pmu_irq(pmu, parent_irq);
pmu                69 drivers/soc/rockchip/pm_domains.c 	struct rockchip_pmu *pmu;
pmu               136 drivers/soc/rockchip/pm_domains.c 	struct rockchip_pmu *pmu = pd->pmu;
pmu               140 drivers/soc/rockchip/pm_domains.c 	regmap_read(pmu->regmap, pmu->info->idle_offset, &val);
pmu               144 drivers/soc/rockchip/pm_domains.c static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
pmu               148 drivers/soc/rockchip/pm_domains.c 	regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
pmu               157 drivers/soc/rockchip/pm_domains.c 	struct rockchip_pmu *pmu = pd->pmu;
pmu               166 drivers/soc/rockchip/pm_domains.c 		regmap_write(pmu->regmap, pmu->info->req_offset,
pmu               170 drivers/soc/rockchip/pm_domains.c 		regmap_update_bits(pmu->regmap, pmu->info->req_offset,
pmu               177 drivers/soc/rockchip/pm_domains.c 	ret = readx_poll_timeout_atomic(rockchip_pmu_read_ack, pmu, val,
pmu               181 drivers/soc/rockchip/pm_domains.c 		dev_err(pmu->dev,
pmu               190 drivers/soc/rockchip/pm_domains.c 		dev_err(pmu->dev,
pmu               250 drivers/soc/rockchip/pm_domains.c 	struct rockchip_pmu *pmu = pd->pmu;
pmu               257 drivers/soc/rockchip/pm_domains.c 	regmap_read(pmu->regmap, pmu->info->status_offset, &val);
pmu               266 drivers/soc/rockchip/pm_domains.c 	struct rockchip_pmu *pmu = pd->pmu;
pmu               273 drivers/soc/rockchip/pm_domains.c 		regmap_write(pmu->regmap, pmu->info->pwr_offset,
pmu               277 drivers/soc/rockchip/pm_domains.c 		regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
pmu               284 drivers/soc/rockchip/pm_domains.c 		dev_err(pmu->dev,
pmu               293 drivers/soc/rockchip/pm_domains.c 	struct rockchip_pmu *pmu = pd->pmu;
pmu               296 drivers/soc/rockchip/pm_domains.c 	mutex_lock(&pmu->mutex);
pmu               301 drivers/soc/rockchip/pm_domains.c 			dev_err(pmu->dev, "failed to enable clocks\n");
pmu               302 drivers/soc/rockchip/pm_domains.c 			mutex_unlock(&pmu->mutex);
pmu               325 drivers/soc/rockchip/pm_domains.c 	mutex_unlock(&pmu->mutex);
pmu               381 drivers/soc/rockchip/pm_domains.c static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pmu               393 drivers/soc/rockchip/pm_domains.c 		dev_err(pmu->dev,
pmu               399 drivers/soc/rockchip/pm_domains.c 	if (id >= pmu->info->num_domains) {
pmu               400 drivers/soc/rockchip/pm_domains.c 		dev_err(pmu->dev, "%pOFn: invalid domain id %d\n",
pmu               405 drivers/soc/rockchip/pm_domains.c 	pd_info = &pmu->info->domain_info[id];
pmu               407 drivers/soc/rockchip/pm_domains.c 		dev_err(pmu->dev, "%pOFn: undefined domain id %d\n",
pmu               412 drivers/soc/rockchip/pm_domains.c 	pd = devm_kzalloc(pmu->dev, sizeof(*pd), GFP_KERNEL);
pmu               417 drivers/soc/rockchip/pm_domains.c 	pd->pmu = pmu;
pmu               421 drivers/soc/rockchip/pm_domains.c 		pd->clks = devm_kcalloc(pmu->dev, pd->num_clks,
pmu               426 drivers/soc/rockchip/pm_domains.c 		dev_dbg(pmu->dev, "%pOFn: doesn't have clocks: %d\n",
pmu               435 drivers/soc/rockchip/pm_domains.c 			dev_err(pmu->dev,
pmu               450 drivers/soc/rockchip/pm_domains.c 		pd->qos_regmap = devm_kcalloc(pmu->dev, pd->num_qos,
pmu               459 drivers/soc/rockchip/pm_domains.c 			pd->qos_save_regs[j] = devm_kcalloc(pmu->dev,
pmu               487 drivers/soc/rockchip/pm_domains.c 		dev_err(pmu->dev,
pmu               503 drivers/soc/rockchip/pm_domains.c 	pmu->genpd_data.domains[id] = &pd->genpd;
pmu               523 drivers/soc/rockchip/pm_domains.c 		dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n",
pmu               530 drivers/soc/rockchip/pm_domains.c 	mutex_lock(&pd->pmu->mutex);
pmu               532 drivers/soc/rockchip/pm_domains.c 	mutex_unlock(&pd->pmu->mutex);
pmu               537 drivers/soc/rockchip/pm_domains.c static void rockchip_pm_domain_cleanup(struct rockchip_pmu *pmu)
pmu               543 drivers/soc/rockchip/pm_domains.c 	for (i = 0; i < pmu->genpd_data.num_domains; i++) {
pmu               544 drivers/soc/rockchip/pm_domains.c 		genpd = pmu->genpd_data.domains[i];
pmu               554 drivers/soc/rockchip/pm_domains.c static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
pmu               559 drivers/soc/rockchip/pm_domains.c 	regmap_write(pmu->regmap, domain_reg_offset, count);
pmu               561 drivers/soc/rockchip/pm_domains.c 	regmap_write(pmu->regmap, domain_reg_offset + 4, count);
pmu               564 drivers/soc/rockchip/pm_domains.c static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
pmu               576 drivers/soc/rockchip/pm_domains.c 			dev_err(pmu->dev,
pmu               581 drivers/soc/rockchip/pm_domains.c 		parent_domain = pmu->genpd_data.domains[idx];
pmu               583 drivers/soc/rockchip/pm_domains.c 		error = rockchip_pm_add_one_domain(pmu, np);
pmu               585 drivers/soc/rockchip/pm_domains.c 			dev_err(pmu->dev, "failed to handle node %pOFn: %d\n",
pmu               592 drivers/soc/rockchip/pm_domains.c 			dev_err(pmu->dev,
pmu               597 drivers/soc/rockchip/pm_domains.c 		child_domain = pmu->genpd_data.domains[idx];
pmu               601 drivers/soc/rockchip/pm_domains.c 			dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n",
pmu               605 drivers/soc/rockchip/pm_domains.c 			dev_dbg(pmu->dev, "%s add subdomain: %s\n",
pmu               609 drivers/soc/rockchip/pm_domains.c 		rockchip_pm_add_subdomain(pmu, np);
pmu               625 drivers/soc/rockchip/pm_domains.c 	struct rockchip_pmu *pmu;
pmu               643 drivers/soc/rockchip/pm_domains.c 	pmu = devm_kzalloc(dev,
pmu               644 drivers/soc/rockchip/pm_domains.c 			   struct_size(pmu, domains, pmu_info->num_domains),
pmu               646 drivers/soc/rockchip/pm_domains.c 	if (!pmu)
pmu               649 drivers/soc/rockchip/pm_domains.c 	pmu->dev = &pdev->dev;
pmu               650 drivers/soc/rockchip/pm_domains.c 	mutex_init(&pmu->mutex);
pmu               652 drivers/soc/rockchip/pm_domains.c 	pmu->info = pmu_info;
pmu               654 drivers/soc/rockchip/pm_domains.c 	pmu->genpd_data.domains = pmu->domains;
pmu               655 drivers/soc/rockchip/pm_domains.c 	pmu->genpd_data.num_domains = pmu_info->num_domains;
pmu               663 drivers/soc/rockchip/pm_domains.c 	pmu->regmap = syscon_node_to_regmap(parent->of_node);
pmu               664 drivers/soc/rockchip/pm_domains.c 	if (IS_ERR(pmu->regmap)) {
pmu               666 drivers/soc/rockchip/pm_domains.c 		return PTR_ERR(pmu->regmap);
pmu               674 drivers/soc/rockchip/pm_domains.c 		rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
pmu               677 drivers/soc/rockchip/pm_domains.c 		rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
pmu               683 drivers/soc/rockchip/pm_domains.c 		error = rockchip_pm_add_one_domain(pmu, node);
pmu               691 drivers/soc/rockchip/pm_domains.c 		error = rockchip_pm_add_subdomain(pmu, node);
pmu               705 drivers/soc/rockchip/pm_domains.c 	error = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
pmu               714 drivers/soc/rockchip/pm_domains.c 	rockchip_pm_domain_cleanup(pmu);
pmu               104 drivers/ssb/driver_chipcommon_pmu.c 	cc->pmu.crystalfreq = e->freq;
pmu               233 drivers/ssb/driver_chipcommon_pmu.c 		cc->pmu.crystalfreq = 20000;
pmu               243 drivers/ssb/driver_chipcommon_pmu.c 	cc->pmu.crystalfreq = e->freq;
pmu               344 drivers/ssb/driver_chipcommon_pmu.c 		if (cc->pmu.rev == 2) {
pmu               525 drivers/ssb/driver_chipcommon_pmu.c 	cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION);
pmu               528 drivers/ssb/driver_chipcommon_pmu.c 		cc->pmu.rev, pmucap);
pmu               530 drivers/ssb/driver_chipcommon_pmu.c 	if (cc->pmu.rev == 1)
pmu                32 include/kvm/arm_pmu.h #define kvm_arm_pmu_v3_ready(v)		((v)->arch.pmu.ready)
pmu                33 include/kvm/arm_pmu.h #define kvm_arm_pmu_irq_initialized(v)	((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
pmu               626 include/linux/bcma/bcma_driver_chipcommon.h 	struct bcma_chipcommon_pmu pmu;
pmu               672 include/linux/bcma/bcma_driver_chipcommon.h 	bcma_read32((cc)->pmu.core, offset)
pmu               674 include/linux/bcma/bcma_driver_chipcommon.h 	bcma_write32((cc)->pmu.core, offset, val)
pmu                80 include/linux/perf/arm_pmu.h 	struct pmu	pmu;
pmu                83 include/linux/perf/arm_pmu.h 	irqreturn_t	(*handle_irq)(struct arm_pmu *pmu);
pmu               116 include/linux/perf/arm_pmu.h #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
pmu               165 include/linux/perf/arm_pmu.h void armpmu_free(struct arm_pmu *pmu);
pmu               166 include/linux/perf/arm_pmu.h int armpmu_register(struct arm_pmu *pmu);
pmu               282 include/linux/perf_event.h 	void (*pmu_enable)		(struct pmu *pmu); /* optional */
pmu               283 include/linux/perf_event.h 	void (*pmu_disable)		(struct pmu *pmu); /* optional */
pmu               378 include/linux/perf_event.h 	void (*start_txn)		(struct pmu *pmu, unsigned int txn_flags);
pmu               387 include/linux/perf_event.h 	int  (*commit_txn)		(struct pmu *pmu);
pmu               394 include/linux/perf_event.h 	void (*cancel_txn)		(struct pmu *pmu);
pmu               611 include/linux/perf_event.h 	struct pmu			*pmu;
pmu               740 include/linux/perf_event.h 	struct pmu			*pmu;
pmu               889 include/linux/perf_event.h extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
pmu               890 include/linux/perf_event.h extern void perf_pmu_unregister(struct pmu *pmu);
pmu               906 include/linux/perf_event.h extern void perf_pmu_disable(struct pmu *pmu);
pmu               907 include/linux/perf_event.h extern void perf_pmu_enable(struct pmu *pmu);
pmu               908 include/linux/perf_event.h extern void perf_sched_cb_dec(struct pmu *pmu);
pmu               909 include/linux/perf_event.h extern void perf_sched_cb_inc(struct pmu *pmu);
pmu               913 include/linux/perf_event.h extern void perf_pmu_resched(struct pmu *pmu);
pmu               924 include/linux/perf_event.h extern void perf_pmu_migrate_context(struct pmu *pmu,
pmu              1069 include/linux/perf_event.h 	return event->ctx->pmu->task_ctx_nr == perf_sw_context;
pmu              1072 include/linux/perf_event.h static inline int is_exclusive_pmu(struct pmu *pmu)
pmu              1074 include/linux/perf_event.h 	return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
pmu              1287 include/linux/perf_event.h 	return event->pmu->setup_aux;
pmu              1297 include/linux/perf_event.h 	return event->pmu->nr_addr_filters;
pmu               594 include/linux/ssb/ssb_driver_chipcommon.h 	struct ssb_chipcommon_pmu pmu;
pmu                92 include/xen/interface/xenpmu.h 	struct xen_pmu_arch pmu;
pmu               153 kernel/events/core.c 	return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
pmu               816 kernel/events/core.c 		perf_pmu_disable(cpuctx->ctx.pmu);
pmu               840 kernel/events/core.c 		perf_pmu_enable(cpuctx->ctx.pmu);
pmu              1093 kernel/events/core.c 	struct pmu *pmu = cpuctx->ctx.pmu;
pmu              1097 kernel/events/core.c 	if (pmu->task_ctx_nr == perf_sw_context)
pmu              1104 kernel/events/core.c 	interval = pmu->hrtimer_interval_ms;
pmu              1106 kernel/events/core.c 		interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
pmu              1118 kernel/events/core.c 	struct pmu *pmu = cpuctx->ctx.pmu;
pmu              1122 kernel/events/core.c 	if (pmu->task_ctx_nr == perf_sw_context)
pmu              1136 kernel/events/core.c void perf_pmu_disable(struct pmu *pmu)
pmu              1138 kernel/events/core.c 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
pmu              1140 kernel/events/core.c 		pmu->pmu_disable(pmu);
pmu              1143 kernel/events/core.c void perf_pmu_enable(struct pmu *pmu)
pmu              1145 kernel/events/core.c 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
pmu              1147 kernel/events/core.c 		pmu->pmu_enable(pmu);
pmu              1901 kernel/events/core.c 	if (!event->pmu->aux_output_match)
pmu              1904 kernel/events/core.c 	return event->pmu->aux_output_match(aux_event);
pmu              2045 kernel/events/core.c 	struct pmu *pmu = event->pmu;
pmu              2046 kernel/events/core.c 	return pmu->filter_match ? pmu->filter_match(event) : 1;
pmu              2097 kernel/events/core.c 	perf_pmu_disable(event->pmu);
pmu              2099 kernel/events/core.c 	event->pmu->del(event, 0);
pmu              2117 kernel/events/core.c 	perf_pmu_enable(event->pmu);
pmu              2130 kernel/events/core.c 	perf_pmu_disable(ctx->pmu);
pmu              2140 kernel/events/core.c 	perf_pmu_enable(ctx->pmu);
pmu              2366 kernel/events/core.c 	perf_pmu_disable(event->pmu);
pmu              2372 kernel/events/core.c 	if (event->pmu->add(event, PERF_EF_START)) {
pmu              2390 kernel/events/core.c 	perf_pmu_enable(event->pmu);
pmu              2401 kernel/events/core.c 	struct pmu *pmu = ctx->pmu;
pmu              2406 kernel/events/core.c 	pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
pmu              2409 kernel/events/core.c 		pmu->cancel_txn(pmu);
pmu              2424 kernel/events/core.c 	if (!pmu->commit_txn(pmu))
pmu              2441 kernel/events/core.c 	pmu->cancel_txn(pmu);
pmu              2551 kernel/events/core.c 	perf_pmu_disable(cpuctx->ctx.pmu);
pmu              2568 kernel/events/core.c 	perf_pmu_enable(cpuctx->ctx.pmu);
pmu              2571 kernel/events/core.c void perf_pmu_resched(struct pmu *pmu)
pmu              2573 kernel/events/core.c 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
pmu              2858 kernel/events/core.c 	event->pmu->stop(event, PERF_EF_UPDATE);
pmu              2870 kernel/events/core.c 		event->pmu->start(event, 0);
pmu              2933 kernel/events/core.c 		event->pmu->addr_filters_sync(event);
pmu              3056 kernel/events/core.c 	perf_pmu_disable(ctx->pmu);
pmu              3066 kernel/events/core.c 	perf_pmu_enable(ctx->pmu);
pmu              3123 kernel/events/core.c 		event->pmu->read(event);
pmu              3246 kernel/events/core.c void perf_sched_cb_dec(struct pmu *pmu)
pmu              3248 kernel/events/core.c 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
pmu              3257 kernel/events/core.c void perf_sched_cb_inc(struct pmu *pmu)
pmu              3259 kernel/events/core.c 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
pmu              3280 kernel/events/core.c 	struct pmu *pmu;
pmu              3286 kernel/events/core.c 		pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
pmu              3288 kernel/events/core.c 		if (WARN_ON_ONCE(!pmu->sched_task))
pmu              3292 kernel/events/core.c 		perf_pmu_disable(pmu);
pmu              3294 kernel/events/core.c 		pmu->sched_task(cpuctx->task_ctx, sched_in);
pmu              3296 kernel/events/core.c 		perf_pmu_enable(pmu);
pmu              3534 kernel/events/core.c 	perf_pmu_disable(ctx->pmu);
pmu              3546 kernel/events/core.c 	perf_pmu_enable(ctx->pmu);
pmu              3690 kernel/events/core.c 			event->pmu->stop(event, PERF_EF_UPDATE);
pmu              3695 kernel/events/core.c 			event->pmu->start(event, PERF_EF_RELOAD);
pmu              3721 kernel/events/core.c 	perf_pmu_disable(ctx->pmu);
pmu              3730 kernel/events/core.c 		perf_pmu_disable(event->pmu);
pmu              3737 kernel/events/core.c 			event->pmu->start(event, 0);
pmu              3746 kernel/events/core.c 		event->pmu->stop(event, PERF_EF_UPDATE);
pmu              3762 kernel/events/core.c 		event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
pmu              3764 kernel/events/core.c 		perf_pmu_enable(event->pmu);
pmu              3767 kernel/events/core.c 	perf_pmu_enable(ctx->pmu);
pmu              3825 kernel/events/core.c 	perf_pmu_disable(cpuctx->ctx.pmu);
pmu              3848 kernel/events/core.c 	perf_pmu_enable(cpuctx->ctx.pmu);
pmu              3961 kernel/events/core.c 	struct pmu *pmu = event->pmu;
pmu              3987 kernel/events/core.c 		pmu->read(event);
pmu              3992 kernel/events/core.c 	pmu->start_txn(pmu, PERF_PMU_TXN_READ);
pmu              3994 kernel/events/core.c 	pmu->read(event);
pmu              4002 kernel/events/core.c 			sub->pmu->read(sub);
pmu              4006 kernel/events/core.c 	data->ret = pmu->commit_txn(pmu);
pmu              4072 kernel/events/core.c 		event->pmu->read(event);
pmu              4185 kernel/events/core.c alloc_perf_context(struct pmu *pmu, struct task_struct *task)
pmu              4196 kernel/events/core.c 	ctx->pmu = pmu;
pmu              4225 kernel/events/core.c find_get_context(struct pmu *pmu, struct task_struct *task,
pmu              4240 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
pmu              4249 kernel/events/core.c 	ctxn = pmu->task_ctx_nr;
pmu              4254 kernel/events/core.c 		task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
pmu              4276 kernel/events/core.c 		ctx = alloc_perf_context(pmu, task);
pmu              4466 kernel/events/core.c 	struct pmu *pmu = event->pmu;
pmu              4468 kernel/events/core.c 	if (!is_exclusive_pmu(pmu))
pmu              4485 kernel/events/core.c 		if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
pmu              4488 kernel/events/core.c 		if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
pmu              4497 kernel/events/core.c 	struct pmu *pmu = event->pmu;
pmu              4499 kernel/events/core.c 	if (!is_exclusive_pmu(pmu))
pmu              4504 kernel/events/core.c 		atomic_dec(&pmu->exclusive_cnt);
pmu              4506 kernel/events/core.c 		atomic_inc(&pmu->exclusive_cnt);
pmu              4511 kernel/events/core.c 	if ((e1->pmu == e2->pmu) &&
pmu              4523 kernel/events/core.c 	struct pmu *pmu = event->pmu;
pmu              4527 kernel/events/core.c 	if (!is_exclusive_pmu(pmu))
pmu              4589 kernel/events/core.c 	module_put(event->pmu->module);
pmu              5089 kernel/events/core.c 		perf_pmu_disable(ctx->pmu);
pmu              5098 kernel/events/core.c 		event->pmu->stop(event, PERF_EF_UPDATE);
pmu              5104 kernel/events/core.c 		event->pmu->start(event, PERF_EF_RELOAD);
pmu              5105 kernel/events/core.c 		perf_pmu_enable(ctx->pmu);
pmu              5111 kernel/events/core.c 	return event->pmu->check_period(event, value);
pmu              5333 kernel/events/core.c 	return event->pmu->event_idx(event);
pmu              5573 kernel/events/core.c 	if (event->pmu->event_mapped)
pmu              5574 kernel/events/core.c 		event->pmu->event_mapped(event, vma->vm_mm);
pmu              5596 kernel/events/core.c 	if (event->pmu->event_unmapped)
pmu              5597 kernel/events/core.c 		event->pmu->event_unmapped(event, vma->vm_mm);
pmu              5915 kernel/events/core.c 	if (event->pmu->event_mapped)
pmu              5916 kernel/events/core.c 		event->pmu->event_mapped(event, vma->vm_mm);
pmu              6302 kernel/events/core.c 		leader->pmu->read(leader);
pmu              6315 kernel/events/core.c 			sub->pmu->read(sub);
pmu              6966 kernel/events/core.c 	struct pmu *pmu = event->ctx->pmu;
pmu              6967 kernel/events/core.c 	struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
pmu              8115 kernel/events/core.c 	if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
pmu              8658 kernel/events/core.c static struct pmu perf_swevent = {
pmu              8804 kernel/events/core.c static struct pmu perf_tracepoint = {
pmu              8856 kernel/events/core.c static struct pmu perf_kprobe = {
pmu              8915 kernel/events/core.c static struct pmu perf_uprobe = {
pmu              9047 kernel/events/core.c 	if (event->pmu == &perf_tracepoint)
pmu              9050 kernel/events/core.c 	if (event->pmu == &perf_kprobe)
pmu              9054 kernel/events/core.c 	if (event->pmu == &perf_uprobe)
pmu              9516 kernel/events/core.c 	ret = event->pmu->addr_filters_validate(&filters);
pmu              9590 kernel/events/core.c 	event->pmu->read(event);
pmu              9729 kernel/events/core.c static struct pmu perf_cpu_clock = {
pmu              9810 kernel/events/core.c static struct pmu perf_task_clock = {
pmu              9823 kernel/events/core.c static void perf_pmu_nop_void(struct pmu *pmu)
pmu              9827 kernel/events/core.c static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
pmu              9831 kernel/events/core.c static int perf_pmu_nop_int(struct pmu *pmu)
pmu              9843 kernel/events/core.c static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
pmu              9850 kernel/events/core.c 	perf_pmu_disable(pmu);
pmu              9853 kernel/events/core.c static int perf_pmu_commit_txn(struct pmu *pmu)
pmu              9862 kernel/events/core.c 	perf_pmu_enable(pmu);
pmu              9866 kernel/events/core.c static void perf_pmu_cancel_txn(struct pmu *pmu)
pmu              9875 kernel/events/core.c 	perf_pmu_enable(pmu);
pmu              9889 kernel/events/core.c 	struct pmu *pmu;
pmu              9894 kernel/events/core.c 	list_for_each_entry(pmu, &pmus, entry) {
pmu              9895 kernel/events/core.c 		if (pmu->task_ctx_nr == ctxn)
pmu              9896 kernel/events/core.c 			return pmu->pmu_cpu_context;
pmu              9902 kernel/events/core.c static void free_pmu_context(struct pmu *pmu)
pmu              9909 kernel/events/core.c 	if (pmu->task_ctx_nr > perf_invalid_context)
pmu              9912 kernel/events/core.c 	free_percpu(pmu->pmu_cpu_context);
pmu              9922 kernel/events/core.c 	struct pmu *pmu = dev_get_drvdata(dev);
pmu              9924 kernel/events/core.c 	return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
pmu              9933 kernel/events/core.c 	struct pmu *pmu = dev_get_drvdata(dev);
pmu              9935 kernel/events/core.c 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
pmu              9944 kernel/events/core.c 	struct pmu *pmu = dev_get_drvdata(dev);
pmu              9946 kernel/events/core.c 	return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
pmu              9956 kernel/events/core.c 	struct pmu *pmu = dev_get_drvdata(dev);
pmu              9967 kernel/events/core.c 	if (timer == pmu->hrtimer_interval_ms)
pmu              9971 kernel/events/core.c 	pmu->hrtimer_interval_ms = timer;
pmu              9977 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
pmu              10008 kernel/events/core.c static int pmu_dev_alloc(struct pmu *pmu)
pmu              10012 kernel/events/core.c 	pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
pmu              10013 kernel/events/core.c 	if (!pmu->dev)
pmu              10016 kernel/events/core.c 	pmu->dev->groups = pmu->attr_groups;
pmu              10017 kernel/events/core.c 	device_initialize(pmu->dev);
pmu              10018 kernel/events/core.c 	ret = dev_set_name(pmu->dev, "%s", pmu->name);
pmu              10022 kernel/events/core.c 	dev_set_drvdata(pmu->dev, pmu);
pmu              10023 kernel/events/core.c 	pmu->dev->bus = &pmu_bus;
pmu              10024 kernel/events/core.c 	pmu->dev->release = pmu_dev_release;
pmu              10025 kernel/events/core.c 	ret = device_add(pmu->dev);
pmu              10030 kernel/events/core.c 	if (pmu->nr_addr_filters)
pmu              10031 kernel/events/core.c 		ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
pmu              10036 kernel/events/core.c 	if (pmu->attr_update)
pmu              10037 kernel/events/core.c 		ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
pmu              10046 kernel/events/core.c 	device_del(pmu->dev);
pmu              10049 kernel/events/core.c 	put_device(pmu->dev);
pmu              10056 kernel/events/core.c int perf_pmu_register(struct pmu *pmu, const char *name, int type)
pmu              10062 kernel/events/core.c 	pmu->pmu_disable_count = alloc_percpu(int);
pmu              10063 kernel/events/core.c 	if (!pmu->pmu_disable_count)
pmu              10066 kernel/events/core.c 	pmu->type = -1;
pmu              10069 kernel/events/core.c 	pmu->name = name;
pmu              10072 kernel/events/core.c 		type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
pmu              10078 kernel/events/core.c 	pmu->type = type;
pmu              10081 kernel/events/core.c 		ret = pmu_dev_alloc(pmu);
pmu              10087 kernel/events/core.c 	if (pmu->task_ctx_nr == perf_hw_context) {
pmu              10096 kernel/events/core.c 		    !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
pmu              10097 kernel/events/core.c 			pmu->task_ctx_nr = perf_invalid_context;
pmu              10102 kernel/events/core.c 	pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
pmu              10103 kernel/events/core.c 	if (pmu->pmu_cpu_context)
pmu              10107 kernel/events/core.c 	pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
pmu              10108 kernel/events/core.c 	if (!pmu->pmu_cpu_context)
pmu              10114 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
pmu              10118 kernel/events/core.c 		cpuctx->ctx.pmu = pmu;
pmu              10125 kernel/events/core.c 	if (!pmu->start_txn) {
pmu              10126 kernel/events/core.c 		if (pmu->pmu_enable) {
pmu              10132 kernel/events/core.c 			pmu->start_txn  = perf_pmu_start_txn;
pmu              10133 kernel/events/core.c 			pmu->commit_txn = perf_pmu_commit_txn;
pmu              10134 kernel/events/core.c 			pmu->cancel_txn = perf_pmu_cancel_txn;
pmu              10136 kernel/events/core.c 			pmu->start_txn  = perf_pmu_nop_txn;
pmu              10137 kernel/events/core.c 			pmu->commit_txn = perf_pmu_nop_int;
pmu              10138 kernel/events/core.c 			pmu->cancel_txn = perf_pmu_nop_void;
pmu              10142 kernel/events/core.c 	if (!pmu->pmu_enable) {
pmu              10143 kernel/events/core.c 		pmu->pmu_enable  = perf_pmu_nop_void;
pmu              10144 kernel/events/core.c 		pmu->pmu_disable = perf_pmu_nop_void;
pmu              10147 kernel/events/core.c 	if (!pmu->check_period)
pmu              10148 kernel/events/core.c 		pmu->check_period = perf_event_nop_int;
pmu              10150 kernel/events/core.c 	if (!pmu->event_idx)
pmu              10151 kernel/events/core.c 		pmu->event_idx = perf_event_idx_default;
pmu              10153 kernel/events/core.c 	list_add_rcu(&pmu->entry, &pmus);
pmu              10154 kernel/events/core.c 	atomic_set(&pmu->exclusive_cnt, 0);
pmu              10162 kernel/events/core.c 	device_del(pmu->dev);
pmu              10163 kernel/events/core.c 	put_device(pmu->dev);
pmu              10166 kernel/events/core.c 	if (pmu->type >= PERF_TYPE_MAX)
pmu              10167 kernel/events/core.c 		idr_remove(&pmu_idr, pmu->type);
pmu              10170 kernel/events/core.c 	free_percpu(pmu->pmu_disable_count);
pmu              10175 kernel/events/core.c void perf_pmu_unregister(struct pmu *pmu)
pmu              10178 kernel/events/core.c 	list_del_rcu(&pmu->entry);
pmu              10187 kernel/events/core.c 	free_percpu(pmu->pmu_disable_count);
pmu              10188 kernel/events/core.c 	if (pmu->type >= PERF_TYPE_MAX)
pmu              10189 kernel/events/core.c 		idr_remove(&pmu_idr, pmu->type);
pmu              10191 kernel/events/core.c 		if (pmu->nr_addr_filters)
pmu              10192 kernel/events/core.c 			device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
pmu              10193 kernel/events/core.c 		device_del(pmu->dev);
pmu              10194 kernel/events/core.c 		put_device(pmu->dev);
pmu              10196 kernel/events/core.c 	free_pmu_context(pmu);
pmu              10207 kernel/events/core.c static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
pmu              10212 kernel/events/core.c 	if (!try_module_get(pmu->module))
pmu              10221 kernel/events/core.c 	if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
pmu              10231 kernel/events/core.c 	event->pmu = pmu;
pmu              10232 kernel/events/core.c 	ret = pmu->event_init(event);
pmu              10238 kernel/events/core.c 		if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
pmu              10242 kernel/events/core.c 		if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
pmu              10251 kernel/events/core.c 		module_put(pmu->module);
pmu              10256 kernel/events/core.c static struct pmu *perf_init_event(struct perf_event *event)
pmu              10258 kernel/events/core.c 	struct pmu *pmu;
pmu              10265 kernel/events/core.c 	if (event->parent && event->parent->pmu) {
pmu              10266 kernel/events/core.c 		pmu = event->parent->pmu;
pmu              10267 kernel/events/core.c 		ret = perf_try_init_event(pmu, event);
pmu              10273 kernel/events/core.c 	pmu = idr_find(&pmu_idr, event->attr.type);
pmu              10275 kernel/events/core.c 	if (pmu) {
pmu              10276 kernel/events/core.c 		ret = perf_try_init_event(pmu, event);
pmu              10278 kernel/events/core.c 			pmu = ERR_PTR(ret);
pmu              10282 kernel/events/core.c 	list_for_each_entry_rcu(pmu, &pmus, entry) {
pmu              10283 kernel/events/core.c 		ret = perf_try_init_event(pmu, event);
pmu              10288 kernel/events/core.c 			pmu = ERR_PTR(ret);
pmu              10292 kernel/events/core.c 	pmu = ERR_PTR(-ENOENT);
pmu              10296 kernel/events/core.c 	return pmu;
pmu              10427 kernel/events/core.c 	struct pmu *pmu;
pmu              10472 kernel/events/core.c 	event->pmu		= NULL;
pmu              10527 kernel/events/core.c 	pmu = NULL;
pmu              10553 kernel/events/core.c 	pmu = perf_init_event(event);
pmu              10554 kernel/events/core.c 	if (IS_ERR(pmu)) {
pmu              10555 kernel/events/core.c 		err = PTR_ERR(pmu);
pmu              10563 kernel/events/core.c 	if (pmu->task_ctx_nr == perf_invalid_context && cgroup_fd != -1) {
pmu              10569 kernel/events/core.c 	    !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
pmu              10579 kernel/events/core.c 		event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
pmu              10597 kernel/events/core.c 			       pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
pmu              10627 kernel/events/core.c 	module_put(pmu->module);
pmu              10788 kernel/events/core.c 	    event->pmu != output_event->pmu)
pmu              10854 kernel/events/core.c 	if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
pmu              10910 kernel/events/core.c 	struct pmu *pmu;
pmu              11025 kernel/events/core.c 		if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
pmu              11035 kernel/events/core.c 	pmu = event->pmu;
pmu              11043 kernel/events/core.c 	if (pmu->task_ctx_nr == perf_sw_context)
pmu              11057 kernel/events/core.c 			pmu = group_leader->ctx->pmu;
pmu              11073 kernel/events/core.c 	ctx = find_get_context(pmu, task, event);
pmu              11377 kernel/events/core.c 	ctx = find_get_context(event->pmu, task, event);
pmu              11427 kernel/events/core.c void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
pmu              11434 kernel/events/core.c 	src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
pmu              11435 kernel/events/core.c 	dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
pmu              11822 kernel/events/core.c 		struct pmu *pmu = child_event->pmu;
pmu              11824 kernel/events/core.c 		child_ctx->task_ctx_data = kzalloc(pmu->task_ctx_size,
pmu              11971 kernel/events/core.c 		child_ctx = alloc_perf_context(parent_ctx->pmu, child);
pmu              12162 kernel/events/core.c 	struct pmu *pmu;
pmu              12165 kernel/events/core.c 	list_for_each_entry(pmu, &pmus, entry) {
pmu              12166 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
pmu              12187 kernel/events/core.c 	struct pmu *pmu;
pmu              12193 kernel/events/core.c 	list_for_each_entry(pmu, &pmus, entry) {
pmu              12194 kernel/events/core.c 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
pmu              12273 kernel/events/core.c 	struct pmu *pmu;
pmu              12282 kernel/events/core.c 	list_for_each_entry(pmu, &pmus, entry) {
pmu              12283 kernel/events/core.c 		if (!pmu->name || pmu->type < 0)
pmu              12286 kernel/events/core.c 		ret = pmu_dev_alloc(pmu);
pmu              12287 kernel/events/core.c 		WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
pmu               650 kernel/events/hw_breakpoint.c static struct pmu perf_breakpoint = {
pmu               661 kernel/events/ring_buffer.c 	rb->free_aux = event->pmu->free_aux;
pmu               682 kernel/events/ring_buffer.c 	if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
pmu               690 kernel/events/ring_buffer.c 	rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
pmu               214 tools/perf/arch/arm/util/cs-etm.c static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
pmu               232 tools/perf/arch/arm/util/cs-etm.c 		ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
pmu               553 tools/perf/arch/arm/util/cs-etm.c static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
pmu               562 tools/perf/arch/arm/util/cs-etm.c 	scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
pmu                16 tools/perf/arch/arm/util/pmu.c *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
pmu                19 tools/perf/arch/arm/util/pmu.c 	if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
pmu                21 tools/perf/arch/arm/util/pmu.c 		pmu->selectable = true;
pmu                23 tools/perf/arch/arm/util/pmu.c 	} else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
pmu                24 tools/perf/arch/arm/util/pmu.c 		return arm_spe_pmu_default_config(pmu);
pmu                15 tools/perf/arch/arm64/util/header.c char *get_cpuid_str(struct perf_pmu *pmu)
pmu                25 tools/perf/arch/arm64/util/header.c 	if (!sysfs || !pmu || !pmu->cpus)
pmu                33 tools/perf/arch/arm64/util/header.c 	cpus = perf_cpu_map__get(pmu->cpus);
pmu                61 tools/perf/arch/arm64/util/header.c 		pr_err("failed to get cpuid string for PMU %s\n", pmu->name);
pmu                11 tools/perf/arch/nds32/util/header.c char *get_cpuid_str(struct perf_pmu *pmu)
pmu                18 tools/perf/arch/nds32/util/header.c 	if (!sysfs || !pmu || !pmu->cpus)
pmu                25 tools/perf/arch/nds32/util/header.c 	cpus = cpu_map__get(pmu->cpus);
pmu                38 tools/perf/arch/powerpc/util/header.c get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
pmu               140 tools/perf/arch/s390/util/header.c char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
pmu                72 tools/perf/arch/x86/util/header.c get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
pmu                11 tools/perf/arch/x86/util/pmu.c struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
pmu                14 tools/perf/arch/x86/util/pmu.c 	if (!strcmp(pmu->name, INTEL_PT_PMU_NAME))
pmu                15 tools/perf/arch/x86/util/pmu.c 		return intel_pt_pmu_default_config(pmu);
pmu                16 tools/perf/arch/x86/util/pmu.c 	if (!strcmp(pmu->name, INTEL_BTS_PMU_NAME))
pmu                17 tools/perf/arch/x86/util/pmu.c 		pmu->selectable = true;
pmu               323 tools/perf/pmu-events/jevents.c 				    char *pmu, char *unit, char *perpkg,
pmu               345 tools/perf/pmu-events/jevents.c 	if (pmu)
pmu               346 tools/perf/pmu-events/jevents.c 		fprintf(outfp, "\t.pmu = \"%s\",\n", pmu);
pmu               368 tools/perf/pmu-events/jevents.c 	char *pmu;
pmu               395 tools/perf/pmu-events/jevents.c 	op(pmu);						\
pmu               417 tools/perf/pmu-events/jevents.c 				char *desc, char *long_desc, char *pmu,
pmu               480 tools/perf/pmu-events/jevents.c 	  char **name, char **long_desc, char **pmu, char **filter,
pmu               508 tools/perf/pmu-events/jevents.c 		      char *pmu, char *unit, char *perpkg,
pmu               532 tools/perf/pmu-events/jevents.c 		char *pmu = NULL;
pmu               600 tools/perf/pmu-events/jevents.c 					pmu = strdup(ppmu);
pmu               602 tools/perf/pmu-events/jevents.c 					if (!pmu)
pmu               603 tools/perf/pmu-events/jevents.c 						pmu = strdup("uncore_");
pmu               604 tools/perf/pmu-events/jevents.c 					addfield(map, &pmu, "", "", val);
pmu               605 tools/perf/pmu-events/jevents.c 					for (s = pmu; *s; s++)
pmu               609 tools/perf/pmu-events/jevents.c 				addfield(map, &desc, "", pmu, NULL);
pmu               659 tools/perf/pmu-events/jevents.c 					&long_desc, &pmu, &filter, &perpkg,
pmu               666 tools/perf/pmu-events/jevents.c 			   pmu, unit, perpkg, metric_expr, metric_name, metric_group);
pmu               673 tools/perf/pmu-events/jevents.c 		free(pmu);
pmu                 8 tools/perf/pmu-events/jevents.h 				char *pmu,
pmu                14 tools/perf/pmu-events/pmu-events.h 	const char *pmu;
pmu              2157 tools/perf/util/auxtrace.c 	struct perf_pmu *pmu = NULL;
pmu              2159 tools/perf/util/auxtrace.c 	while ((pmu = perf_pmu__scan(pmu)) != NULL) {
pmu              2160 tools/perf/util/auxtrace.c 		if (pmu->type == evsel->core.attr.type)
pmu              2164 tools/perf/util/auxtrace.c 	return pmu;
pmu              2169 tools/perf/util/auxtrace.c 	struct perf_pmu *pmu = perf_evsel__find_pmu(evsel);
pmu              2172 tools/perf/util/auxtrace.c 	if (!pmu)
pmu              2175 tools/perf/util/auxtrace.c 	perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters);
pmu               728 tools/perf/util/header.c 	struct perf_pmu *pmu = NULL;
pmu               736 tools/perf/util/header.c 	while ((pmu = perf_pmu__scan(pmu))) {
pmu               737 tools/perf/util/header.c 		if (!pmu->name)
pmu               746 tools/perf/util/header.c 	while ((pmu = perf_pmu__scan(pmu))) {
pmu               747 tools/perf/util/header.c 		if (!pmu->name)
pmu               750 tools/perf/util/header.c 		ret = do_write(ff, &pmu->type, sizeof(pmu->type));
pmu               754 tools/perf/util/header.c 		ret = do_write_string(ff, pmu->name);
pmu               814 tools/perf/util/header.c char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
pmu               166 tools/perf/util/header.h char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused);
pmu                45 tools/perf/util/intel-pt.h struct perf_event_attr *intel_pt_pmu_default_config(struct perf_pmu *pmu);
pmu               322 tools/perf/util/parse-events.c 	    char *name, struct perf_pmu *pmu,
pmu               327 tools/perf/util/parse-events.c 	struct perf_cpu_map *cpus = pmu ? pmu->cpus :
pmu               339 tools/perf/util/parse-events.c 	evsel->core.system_wide = pmu ? pmu->is_uncore : false;
pmu              1318 tools/perf/util/parse-events.c 	struct perf_pmu *pmu;
pmu              1324 tools/perf/util/parse-events.c 	pmu = perf_pmu__find(name);
pmu              1325 tools/perf/util/parse-events.c 	if (!pmu) {
pmu              1333 tools/perf/util/parse-events.c 	if (pmu->default_config) {
pmu              1334 tools/perf/util/parse-events.c 		memcpy(&attr, pmu->default_config,
pmu              1340 tools/perf/util/parse-events.c 	use_uncore_alias = (pmu->is_uncore && use_alias);
pmu              1343 tools/perf/util/parse-events.c 		attr.type = pmu->type;
pmu              1344 tools/perf/util/parse-events.c 		evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL,
pmu              1355 tools/perf/util/parse-events.c 	if (perf_pmu__check_alias(pmu, head_config, &info))
pmu              1368 tools/perf/util/parse-events.c 	if (perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
pmu              1379 tools/perf/util/parse-events.c 			    get_config_name(head_config), pmu,
pmu              1402 tools/perf/util/parse-events.c 	struct perf_pmu *pmu = NULL;
pmu              1411 tools/perf/util/parse-events.c 	while ((pmu = perf_pmu__scan(pmu)) != NULL) {
pmu              1414 tools/perf/util/parse-events.c 		list_for_each_entry(alias, &pmu->aliases, list) {
pmu              1426 tools/perf/util/parse-events.c 							  pmu->name, head,
pmu              1429 tools/perf/util/parse-events.c 						 pmu->name, alias->str);
pmu              1807 tools/perf/util/parse-events.c 	struct perf_pmu *pmu = NULL;
pmu              1811 tools/perf/util/parse-events.c 	pmu = NULL;
pmu              1812 tools/perf/util/parse-events.c 	while ((pmu = perf_pmu__scan(pmu)) != NULL) {
pmu              1813 tools/perf/util/parse-events.c 		list_for_each_entry(alias, &pmu->aliases, list) {
pmu              1830 tools/perf/util/parse-events.c 	pmu = NULL;
pmu              1831 tools/perf/util/parse-events.c 	while ((pmu = perf_pmu__scan(pmu)) != NULL) {
pmu              1832 tools/perf/util/parse-events.c 		list_for_each_entry(alias, &pmu->aliases, list) {
pmu              2088 tools/perf/util/parse-events.c 	struct perf_pmu *pmu = NULL;
pmu              2106 tools/perf/util/parse-events.c 	while ((pmu = perf_pmu__scan(pmu)) != NULL)
pmu              2107 tools/perf/util/parse-events.c 		if (pmu->type == evsel->core.attr.type) {
pmu              2113 tools/perf/util/parse-events.c 		perf_pmu__scan_file(pmu, "nr_addr_filters",
pmu               243 tools/perf/util/parse-events.y 		struct perf_pmu *pmu = NULL;
pmu               250 tools/perf/util/parse-events.y 		while ((pmu = perf_pmu__scan(pmu)) != NULL) {
pmu               251 tools/perf/util/parse-events.y 			char *name = pmu->name;
pmu               261 tools/perf/util/parse-events.y 				if (!parse_events_add_pmu(_parse_state, list, pmu->name, terms, true, false))
pmu               659 tools/perf/util/pmu.c static char *perf_pmu__getcpuid(struct perf_pmu *pmu)
pmu               668 tools/perf/util/pmu.c 		cpuid = get_cpuid_str(pmu);
pmu               679 tools/perf/util/pmu.c struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
pmu               682 tools/perf/util/pmu.c 	char *cpuid = perf_pmu__getcpuid(pmu);
pmu               751 tools/perf/util/pmu.c static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
pmu               755 tools/perf/util/pmu.c 	const char *name = pmu->name;
pmu               757 tools/perf/util/pmu.c 	map = perf_pmu__find_map(pmu);
pmu               768 tools/perf/util/pmu.c 		const char *pname = pe->pmu ? pe->pmu : cpu_name;
pmu               795 tools/perf/util/pmu.c perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
pmu               815 tools/perf/util/pmu.c 	struct perf_pmu *pmu;
pmu               837 tools/perf/util/pmu.c 	pmu = zalloc(sizeof(*pmu));
pmu               838 tools/perf/util/pmu.c 	if (!pmu)
pmu               841 tools/perf/util/pmu.c 	pmu->cpus = pmu_cpumask(name);
pmu               842 tools/perf/util/pmu.c 	pmu->name = strdup(name);
pmu               843 tools/perf/util/pmu.c 	pmu->type = type;
pmu               844 tools/perf/util/pmu.c 	pmu->is_uncore = pmu_is_uncore(name);
pmu               845 tools/perf/util/pmu.c 	pmu->max_precise = pmu_max_precise(name);
pmu               846 tools/perf/util/pmu.c 	pmu_add_cpu_aliases(&aliases, pmu);
pmu               848 tools/perf/util/pmu.c 	INIT_LIST_HEAD(&pmu->format);
pmu               849 tools/perf/util/pmu.c 	INIT_LIST_HEAD(&pmu->aliases);
pmu               850 tools/perf/util/pmu.c 	list_splice(&format, &pmu->format);
pmu               851 tools/perf/util/pmu.c 	list_splice(&aliases, &pmu->aliases);
pmu               852 tools/perf/util/pmu.c 	list_add_tail(&pmu->list, &pmus);
pmu               854 tools/perf/util/pmu.c 	pmu->default_config = perf_pmu__get_default_config(pmu);
pmu               856 tools/perf/util/pmu.c 	return pmu;
pmu               861 tools/perf/util/pmu.c 	struct perf_pmu *pmu;
pmu               863 tools/perf/util/pmu.c 	list_for_each_entry(pmu, &pmus, list)
pmu               864 tools/perf/util/pmu.c 		if (!strcmp(pmu->name, name))
pmu               865 tools/perf/util/pmu.c 			return pmu;
pmu               870 tools/perf/util/pmu.c struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu)
pmu               876 tools/perf/util/pmu.c 	if (!pmu) {
pmu               878 tools/perf/util/pmu.c 		pmu = list_prepare_entry(pmu, &pmus, list);
pmu               880 tools/perf/util/pmu.c 	list_for_each_entry_continue(pmu, &pmus, list)
pmu               881 tools/perf/util/pmu.c 		return pmu;
pmu               887 tools/perf/util/pmu.c 	struct perf_pmu *pmu;
pmu               894 tools/perf/util/pmu.c 	pmu = pmu_find(name);
pmu               895 tools/perf/util/pmu.c 	if (pmu)
pmu               896 tools/perf/util/pmu.c 		return pmu;
pmu              1143 tools/perf/util/pmu.c int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
pmu              1147 tools/perf/util/pmu.c 	bool zero = !!pmu->default_config;
pmu              1149 tools/perf/util/pmu.c 	attr->type = pmu->type;
pmu              1150 tools/perf/util/pmu.c 	return perf_pmu__config_terms(&pmu->format, attr, head_terms,
pmu              1154 tools/perf/util/pmu.c static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
pmu              1166 tools/perf/util/pmu.c 		if (pmu_find_format(&pmu->format, term->config))
pmu              1177 tools/perf/util/pmu.c 	list_for_each_entry(alias, &pmu->aliases, list) {
pmu              1214 tools/perf/util/pmu.c int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
pmu              1234 tools/perf/util/pmu.c 		alias = pmu_find_alias(pmu, term);
pmu              1304 tools/perf/util/pmu.c static char *format_alias(char *buf, int len, struct perf_pmu *pmu,
pmu              1308 tools/perf/util/pmu.c 	int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
pmu              1330 tools/perf/util/pmu.c static char *format_alias_or(char *buf, int len, struct perf_pmu *pmu,
pmu              1333 tools/perf/util/pmu.c 	snprintf(buf, len, "%s OR %s/%s/", alias->name, pmu->name, alias->name);
pmu              1342 tools/perf/util/pmu.c 	char *pmu;
pmu              1388 tools/perf/util/pmu.c 	struct perf_pmu *pmu;
pmu              1398 tools/perf/util/pmu.c 	pmu = NULL;
pmu              1400 tools/perf/util/pmu.c 	while ((pmu = perf_pmu__scan(pmu)) != NULL) {
pmu              1401 tools/perf/util/pmu.c 		list_for_each_entry(alias, &pmu->aliases, list)
pmu              1403 tools/perf/util/pmu.c 		if (pmu->selectable)
pmu              1409 tools/perf/util/pmu.c 	pmu = NULL;
pmu              1411 tools/perf/util/pmu.c 	while ((pmu = perf_pmu__scan(pmu)) != NULL) {
pmu              1412 tools/perf/util/pmu.c 		list_for_each_entry(alias, &pmu->aliases, list) {
pmu              1414 tools/perf/util/pmu.c 				format_alias(buf, sizeof(buf), pmu, alias);
pmu              1415 tools/perf/util/pmu.c 			bool is_cpu = !strcmp(pmu->name, "cpu");
pmu              1426 tools/perf/util/pmu.c 				name = format_alias_or(buf, sizeof(buf), pmu, alias);
pmu              1432 tools/perf/util/pmu.c 								  pmu, alias);
pmu              1441 tools/perf/util/pmu.c 			aliases[j].pmu = pmu->name;
pmu              1446 tools/perf/util/pmu.c 		if (pmu->selectable &&
pmu              1447 tools/perf/util/pmu.c 		    (event_glob == NULL || strglobmatch(pmu->name, event_glob))) {
pmu              1449 tools/perf/util/pmu.c 			if (asprintf(&s, "%s//", pmu->name) < 0)
pmu              1479 tools/perf/util/pmu.c 				printf("%*s%s/%s/ ", 8, "", aliases[j].pmu, aliases[j].str);
pmu              1506 tools/perf/util/pmu.c 	struct perf_pmu *pmu;
pmu              1509 tools/perf/util/pmu.c 	pmu = NULL;
pmu              1510 tools/perf/util/pmu.c 	while ((pmu = perf_pmu__scan(pmu)) != NULL) {
pmu              1511 tools/perf/util/pmu.c 		if (strcmp(pname, pmu->name))
pmu              1513 tools/perf/util/pmu.c 		list_for_each_entry(alias, &pmu->aliases, list)
pmu              1520 tools/perf/util/pmu.c static FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
pmu              1531 tools/perf/util/pmu.c 		 "%s" EVENT_SOURCE_DEVICE_PATH "%s/%s", sysfs, pmu->name, name);
pmu              1539 tools/perf/util/pmu.c int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
pmu              1547 tools/perf/util/pmu.c 	file = perf_pmu__open_file(pmu, name);
pmu                65 tools/perf/util/pmu.h int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
pmu                73 tools/perf/util/pmu.h int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
pmu                75 tools/perf/util/pmu.h struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
pmu                85 tools/perf/util/pmu.h struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
pmu                91 tools/perf/util/pmu.h int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...) __scanf(3, 4);
pmu                95 tools/perf/util/pmu.h struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
pmu                97 tools/perf/util/pmu.h struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
pmu               163 tools/perf/util/s390-sample-raw.c 	struct perf_pmu pmu;
pmu               166 tools/perf/util/s390-sample-raw.c 	memset(&pmu, 0, sizeof(pmu));
pmu               167 tools/perf/util/s390-sample-raw.c 	map = perf_pmu__find_map(&pmu);
pmu                34 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu;
pmu                38 virt/kvm/arm/pmu.c 	pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
pmu                39 virt/kvm/arm/pmu.c 	vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
pmu                51 virt/kvm/arm/pmu.c 	return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
pmu               142 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
pmu               143 virt/kvm/arm/pmu.c 	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
pmu               228 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
pmu               231 virt/kvm/arm/pmu.c 		pmu->pmc[i].idx = i;
pmu               242 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
pmu               245 virt/kvm/arm/pmu.c 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
pmu               247 virt/kvm/arm/pmu.c 	bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
pmu               258 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
pmu               261 virt/kvm/arm/pmu.c 		kvm_pmu_release_perf_event(&pmu->pmc[i]);
pmu               285 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
pmu               295 virt/kvm/arm/pmu.c 		pmc = &pmu->pmc[i];
pmu               326 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
pmu               336 virt/kvm/arm/pmu.c 		pmc = &pmu->pmc[i];
pmu               370 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
pmu               377 virt/kvm/arm/pmu.c 	if (pmu->irq_level == overflow)
pmu               380 virt/kvm/arm/pmu.c 	pmu->irq_level = overflow;
pmu               384 virt/kvm/arm/pmu.c 					      pmu->irq_num, overflow, pmu);
pmu               391 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
pmu               398 virt/kvm/arm/pmu.c 	return pmu->irq_level != run_level;
pmu               410 virt/kvm/arm/pmu.c 	if (vcpu->arch.pmu.irq_level)
pmu               446 virt/kvm/arm/pmu.c 	struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
pmu               451 virt/kvm/arm/pmu.c 	cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
pmu               473 virt/kvm/arm/pmu.c 	cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
pmu               483 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
pmu               512 virt/kvm/arm/pmu.c 		if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
pmu               566 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
pmu               577 virt/kvm/arm/pmu.c 	pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
pmu               648 virt/kvm/arm/pmu.c 	struct kvm_pmu *pmu = &vcpu->arch.pmu;
pmu               649 virt/kvm/arm/pmu.c 	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
pmu               659 virt/kvm/arm/pmu.c 		set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
pmu               661 virt/kvm/arm/pmu.c 		clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
pmu               701 virt/kvm/arm/pmu.c 	if (!vcpu->arch.pmu.created)
pmu               710 virt/kvm/arm/pmu.c 		int irq = vcpu->arch.pmu.irq_num;
pmu               727 virt/kvm/arm/pmu.c 	vcpu->arch.pmu.ready = true;
pmu               740 virt/kvm/arm/pmu.c 	if (vcpu->arch.pmu.created)
pmu               757 virt/kvm/arm/pmu.c 		ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
pmu               758 virt/kvm/arm/pmu.c 					 &vcpu->arch.pmu);
pmu               763 virt/kvm/arm/pmu.c 	vcpu->arch.pmu.created = true;
pmu               782 virt/kvm/arm/pmu.c 			if (vcpu->arch.pmu.irq_num != irq)
pmu               785 virt/kvm/arm/pmu.c 			if (vcpu->arch.pmu.irq_num == irq)
pmu               820 virt/kvm/arm/pmu.c 		vcpu->arch.pmu.irq_num = irq;
pmu               846 virt/kvm/arm/pmu.c 		irq = vcpu->arch.pmu.irq_num;