Lines Matching refs:pmu

38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)  in reprogram_fixed_counters()  argument
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters()
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i); in reprogram_fixed_counters()
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters()
55 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters()
59 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) in global_ctrl_changed() argument
62 u64 diff = pmu->global_ctrl ^ data; in global_ctrl_changed()
64 pmu->global_ctrl = data; in global_ctrl_changed()
67 reprogram_counter(pmu, bit); in global_ctrl_changed()
70 static unsigned intel_find_arch_event(struct kvm_pmu *pmu, in intel_find_arch_event() argument
79 && (pmu->available_event_types & (1 << i))) in intel_find_arch_event()
99 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in intel_pmc_is_enabled() local
101 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); in intel_pmc_is_enabled()
104 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in intel_pmc_idx_to_pmc() argument
107 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx, in intel_pmc_idx_to_pmc()
112 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0); in intel_pmc_idx_to_pmc()
119 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_msr_idx() local
124 return (!fixed && idx >= pmu->nr_arch_gp_counters) || in intel_is_valid_msr_idx()
125 (fixed && idx >= pmu->nr_arch_fixed_counters); in intel_is_valid_msr_idx()
131 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_msr_idx_to_pmc() local
136 if (!fixed && idx >= pmu->nr_arch_gp_counters) in intel_msr_idx_to_pmc()
138 if (fixed && idx >= pmu->nr_arch_fixed_counters) in intel_msr_idx_to_pmc()
140 counters = fixed ? pmu->fixed_counters : pmu->gp_counters; in intel_msr_idx_to_pmc()
147 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_msr() local
155 ret = pmu->version > 1; in intel_is_valid_msr()
158 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr()
159 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr()
160 get_fixed_pmc(pmu, msr); in intel_is_valid_msr()
169 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_get_msr() local
174 *data = pmu->fixed_ctr_ctrl; in intel_pmu_get_msr()
177 *data = pmu->global_status; in intel_pmu_get_msr()
180 *data = pmu->global_ctrl; in intel_pmu_get_msr()
183 *data = pmu->global_ovf_ctrl; in intel_pmu_get_msr()
186 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_get_msr()
187 (pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr()
190 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr()
201 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_set_msr() local
208 if (pmu->fixed_ctr_ctrl == data) in intel_pmu_set_msr()
211 reprogram_fixed_counters(pmu, data); in intel_pmu_set_msr()
217 pmu->global_status = data; in intel_pmu_set_msr()
222 if (pmu->global_ctrl == data) in intel_pmu_set_msr()
224 if (!(data & pmu->global_ctrl_mask)) { in intel_pmu_set_msr()
225 global_ctrl_changed(pmu, data); in intel_pmu_set_msr()
230 if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) { in intel_pmu_set_msr()
232 pmu->global_status &= ~data; in intel_pmu_set_msr()
233 pmu->global_ovf_ctrl = data; in intel_pmu_set_msr()
238 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_set_msr()
239 (pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_set_msr()
244 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_set_msr()
247 if (!(data & pmu->reserved_bits)) { in intel_pmu_set_msr()
259 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_refresh() local
264 pmu->nr_arch_gp_counters = 0; in intel_pmu_refresh()
265 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
266 pmu->counter_bitmask[KVM_PMC_GP] = 0; in intel_pmu_refresh()
267 pmu->counter_bitmask[KVM_PMC_FIXED] = 0; in intel_pmu_refresh()
268 pmu->version = 0; in intel_pmu_refresh()
269 pmu->reserved_bits = 0xffffffff00200000ull; in intel_pmu_refresh()
277 pmu->version = eax.split.version_id; in intel_pmu_refresh()
278 if (!pmu->version) in intel_pmu_refresh()
281 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, in intel_pmu_refresh()
283 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; in intel_pmu_refresh()
284 pmu->available_event_types = ~entry->ebx & in intel_pmu_refresh()
287 if (pmu->version == 1) { in intel_pmu_refresh()
288 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
290 pmu->nr_arch_fixed_counters = in intel_pmu_refresh()
293 pmu->counter_bitmask[KVM_PMC_FIXED] = in intel_pmu_refresh()
297 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | in intel_pmu_refresh()
298 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); in intel_pmu_refresh()
299 pmu->global_ctrl_mask = ~pmu->global_ctrl; in intel_pmu_refresh()
305 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; in intel_pmu_refresh()
311 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_init() local
314 pmu->gp_counters[i].type = KVM_PMC_GP; in intel_pmu_init()
315 pmu->gp_counters[i].vcpu = vcpu; in intel_pmu_init()
316 pmu->gp_counters[i].idx = i; in intel_pmu_init()
320 pmu->fixed_counters[i].type = KVM_PMC_FIXED; in intel_pmu_init()
321 pmu->fixed_counters[i].vcpu = vcpu; in intel_pmu_init()
322 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; in intel_pmu_init()
328 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_reset() local
332 struct kvm_pmc *pmc = &pmu->gp_counters[i]; in intel_pmu_reset()
339 pmc_stop_counter(&pmu->fixed_counters[i]); in intel_pmu_reset()
341 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = in intel_pmu_reset()
342 pmu->global_ovf_ctrl = 0; in intel_pmu_reset()