Lines Matching refs:pmc
62 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow() local
63 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in kvm_perf_overflow()
65 if (!test_and_set_bit(pmc->idx, in kvm_perf_overflow()
67 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in kvm_perf_overflow()
68 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow()
76 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow_intr() local
77 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in kvm_perf_overflow_intr()
79 if (!test_and_set_bit(pmc->idx, in kvm_perf_overflow_intr()
81 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in kvm_perf_overflow_intr()
82 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow_intr()
93 irq_work_queue(&pmc_to_pmu(pmc)->irq_work); in kvm_perf_overflow_intr()
95 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); in kvm_perf_overflow_intr()
99 static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, in pmc_reprogram_counter() argument
121 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); in pmc_reprogram_counter()
125 kvm_perf_overflow, pmc); in pmc_reprogram_counter()
132 pmc->perf_event = event; in pmc_reprogram_counter()
133 clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi); in pmc_reprogram_counter()
136 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) in reprogram_gp_counter() argument
144 pmc->eventsel = eventsel; in reprogram_gp_counter()
146 pmc_stop_counter(pmc); in reprogram_gp_counter()
148 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) in reprogram_gp_counter()
159 config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc), in reprogram_gp_counter()
169 pmc_reprogram_counter(pmc, type, config, in reprogram_gp_counter()
178 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) in reprogram_fixed_counter() argument
183 pmc_stop_counter(pmc); in reprogram_fixed_counter()
185 if (!en_field || !pmc_is_enabled(pmc)) in reprogram_fixed_counter()
188 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, in reprogram_fixed_counter()
198 struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); in reprogram_counter() local
200 if (!pmc) in reprogram_counter()
203 if (pmc_is_gp(pmc)) in reprogram_counter()
204 reprogram_gp_counter(pmc, pmc->eventsel); in reprogram_counter()
209 reprogram_fixed_counter(pmc, ctrl, idx); in reprogram_counter()
223 struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit); in kvm_pmu_handle_event() local
225 if (unlikely(!pmc || !pmc->perf_event)) { in kvm_pmu_handle_event()
243 struct kvm_pmc *pmc; in kvm_pmu_rdpmc() local
246 pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx); in kvm_pmu_rdpmc()
247 if (!pmc) in kvm_pmu_rdpmc()
250 ctr_val = pmc_read_counter(pmc); in kvm_pmu_rdpmc()