Lines Matching refs:pmc
43 static bool pmc_is_gp(struct kvm_pmc *pmc) in pmc_is_gp() argument
45 return pmc->type == KVM_PMC_GP; in pmc_is_gp()
48 static inline u64 pmc_bitmask(struct kvm_pmc *pmc) in pmc_bitmask() argument
50 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; in pmc_bitmask()
52 return pmu->counter_bitmask[pmc->type]; in pmc_bitmask()
55 static inline bool pmc_enabled(struct kvm_pmc *pmc) in pmc_enabled() argument
57 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; in pmc_enabled()
58 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); in pmc_enabled()
110 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow() local
111 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; in kvm_perf_overflow()
112 if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) { in kvm_perf_overflow()
113 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in kvm_perf_overflow()
114 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow()
121 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow_intr() local
122 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu; in kvm_perf_overflow_intr()
123 if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) { in kvm_perf_overflow_intr()
124 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in kvm_perf_overflow_intr()
125 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow_intr()
135 irq_work_queue(&pmc->vcpu->arch.pmu.irq_work); in kvm_perf_overflow_intr()
137 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); in kvm_perf_overflow_intr()
141 static u64 read_pmc(struct kvm_pmc *pmc) in read_pmc() argument
145 counter = pmc->counter; in read_pmc()
147 if (pmc->perf_event) in read_pmc()
148 counter += perf_event_read_value(pmc->perf_event, in read_pmc()
153 return counter & pmc_bitmask(pmc); in read_pmc()
156 static void stop_counter(struct kvm_pmc *pmc) in stop_counter() argument
158 if (pmc->perf_event) { in stop_counter()
159 pmc->counter = read_pmc(pmc); in stop_counter()
160 perf_event_release_kernel(pmc->perf_event); in stop_counter()
161 pmc->perf_event = NULL; in stop_counter()
165 static void reprogram_counter(struct kvm_pmc *pmc, u32 type, in reprogram_counter() argument
185 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); in reprogram_counter()
189 kvm_perf_overflow, pmc); in reprogram_counter()
196 pmc->perf_event = event; in reprogram_counter()
197 clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi); in reprogram_counter()
217 static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) in reprogram_gp_counter() argument
225 pmc->eventsel = eventsel; in reprogram_gp_counter()
227 stop_counter(pmc); in reprogram_gp_counter()
229 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc)) in reprogram_gp_counter()
240 config = find_arch_event(&pmc->vcpu->arch.pmu, event_select, in reprogram_gp_counter()
249 reprogram_counter(pmc, type, config, in reprogram_gp_counter()
257 static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx) in reprogram_fixed_counter() argument
262 stop_counter(pmc); in reprogram_fixed_counter()
264 if (!en || !pmc_enabled(pmc)) in reprogram_fixed_counter()
267 reprogram_counter(pmc, PERF_TYPE_HARDWARE, in reprogram_fixed_counter()
285 struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i); in reprogram_fixed_counters() local
290 reprogram_fixed_counter(pmc, en_pmi, i); in reprogram_fixed_counters()
298 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx); in reprogram_idx() local
300 if (!pmc) in reprogram_idx()
303 if (pmc_is_gp(pmc)) in reprogram_idx()
304 reprogram_gp_counter(pmc, pmc->eventsel); in reprogram_idx()
307 reprogram_fixed_counter(pmc, in reprogram_idx()
347 struct kvm_pmc *pmc; in kvm_pmu_get_msr() local
363 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) || in kvm_pmu_get_msr()
364 (pmc = get_fixed_pmc(pmu, index))) { in kvm_pmu_get_msr()
365 *data = read_pmc(pmc); in kvm_pmu_get_msr()
367 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { in kvm_pmu_get_msr()
368 *data = pmc->eventsel; in kvm_pmu_get_msr()
378 struct kvm_pmc *pmc; in kvm_pmu_set_msr() local
414 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) || in kvm_pmu_set_msr()
415 (pmc = get_fixed_pmc(pmu, index))) { in kvm_pmu_set_msr()
418 pmc->counter += data - read_pmc(pmc); in kvm_pmu_set_msr()
420 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { in kvm_pmu_set_msr()
421 if (data == pmc->eventsel) in kvm_pmu_set_msr()
424 reprogram_gp_counter(pmc, data); in kvm_pmu_set_msr()
432 int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc) in kvm_pmu_check_pmc() argument
435 bool fixed = pmc & (1u << 30); in kvm_pmu_check_pmc()
436 pmc &= ~(3u << 30); in kvm_pmu_check_pmc()
437 return (!fixed && pmc >= pmu->nr_arch_gp_counters) || in kvm_pmu_check_pmc()
438 (fixed && pmc >= pmu->nr_arch_fixed_counters); in kvm_pmu_check_pmc()
441 int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data) in kvm_pmu_read_pmc() argument
444 bool fast_mode = pmc & (1u << 31); in kvm_pmu_read_pmc()
445 bool fixed = pmc & (1u << 30); in kvm_pmu_read_pmc()
449 pmc &= ~(3u << 30); in kvm_pmu_read_pmc()
450 if (!fixed && pmc >= pmu->nr_arch_gp_counters) in kvm_pmu_read_pmc()
452 if (fixed && pmc >= pmu->nr_arch_fixed_counters) in kvm_pmu_read_pmc()
455 ctr = read_pmc(&counters[pmc]); in kvm_pmu_read_pmc()
541 struct kvm_pmc *pmc = &pmu->gp_counters[i]; in kvm_pmu_reset() local
542 stop_counter(pmc); in kvm_pmu_reset()
543 pmc->counter = pmc->eventsel = 0; in kvm_pmu_reset()
567 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit); in kvm_handle_pmu_event() local
569 if (unlikely(!pmc || !pmc->perf_event)) { in kvm_handle_pmu_event()