Lines Matching refs:msr
202 static void shared_msr_update(unsigned slot, u32 msr) in shared_msr_update() argument
214 rdmsrl_safe(msr, &value); in shared_msr_update()
219 void kvm_define_shared_msr(unsigned slot, u32 msr) in kvm_define_shared_msr() argument
224 shared_msrs_global.msrs[slot] = msr; in kvm_define_shared_msr()
1014 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) in kvm_set_msr() argument
1016 switch (msr->index) { in kvm_set_msr()
1022 if (is_noncanonical_address(msr->data)) in kvm_set_msr()
1039 msr->data = get_canonical(msr->data); in kvm_set_msr()
1041 return kvm_x86_ops->set_msr(vcpu, msr); in kvm_set_msr()
1050 struct msr_data msr; in do_set_msr() local
1052 msr.data = *data; in do_set_msr()
1053 msr.index = index; in do_set_msr()
1054 msr.host_initiated = true; in do_set_msr()
1055 return kvm_set_msr(vcpu, &msr); in do_set_msr()
1293 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) in kvm_write_tsc() argument
1301 u64 data = msr->data; in kvm_write_tsc()
1400 if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) in kvm_write_tsc()
1775 static bool msr_mtrr_valid(unsigned msr) in msr_mtrr_valid() argument
1777 switch (msr) { in msr_mtrr_valid()
1809 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) in kvm_mtrr_valid() argument
1814 if (!msr_mtrr_valid(msr)) in kvm_mtrr_valid()
1817 if (msr == MSR_IA32_CR_PAT) { in kvm_mtrr_valid()
1822 } else if (msr == MSR_MTRRdefType) { in kvm_mtrr_valid()
1826 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { in kvm_mtrr_valid()
1834 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); in kvm_mtrr_valid()
1837 if ((msr & 1) == 0) { in kvm_mtrr_valid()
1854 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) in set_msr_mtrr() argument
1858 if (!kvm_mtrr_valid(vcpu, msr, data)) in set_msr_mtrr()
1861 if (msr == MSR_MTRRdefType) { in set_msr_mtrr()
1864 } else if (msr == MSR_MTRRfix64K_00000) in set_msr_mtrr()
1866 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) in set_msr_mtrr()
1867 p[1 + msr - MSR_MTRRfix16K_80000] = data; in set_msr_mtrr()
1868 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) in set_msr_mtrr()
1869 p[3 + msr - MSR_MTRRfix4K_C0000] = data; in set_msr_mtrr()
1870 else if (msr == MSR_IA32_CR_PAT) in set_msr_mtrr()
1876 idx = (msr - 0x200) / 2; in set_msr_mtrr()
1877 is_mtrr_mask = msr - 0x200 - 2 * idx; in set_msr_mtrr()
1891 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) in set_msr_mce() argument
1896 switch (msr) { in set_msr_mce()
1908 if (msr >= MSR_IA32_MC0_CTL && in set_msr_mce()
1909 msr < MSR_IA32_MCx_CTL(bank_num)) { in set_msr_mce()
1910 u32 offset = msr - MSR_IA32_MC0_CTL; in set_msr_mce()
1963 static bool kvm_hv_msr_partition_wide(u32 msr) in kvm_hv_msr_partition_wide() argument
1966 switch (msr) { in kvm_hv_msr_partition_wide()
1978 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) in set_msr_hyperv_pw() argument
1982 switch (msr) { in set_msr_hyperv_pw()
2029 "data 0x%llx\n", msr, data); in set_msr_hyperv_pw()
2035 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) in set_msr_hyperv() argument
2037 switch (msr) { in set_msr_hyperv()
2068 "data 0x%llx\n", msr, data); in set_msr_hyperv()
2139 u32 msr = msr_info->index; in kvm_set_msr_common() local
2142 switch (msr) { in kvm_set_msr_common()
2184 return set_msr_mtrr(vcpu, msr, data); in kvm_set_msr_common()
2188 return kvm_x2apic_msr_write(vcpu, msr, data); in kvm_set_msr_common()
2217 bool tmp = (msr == MSR_KVM_SYSTEM_TIME); in kvm_set_msr_common()
2277 return set_msr_mce(vcpu, msr, data); in kvm_set_msr_common()
2292 "0x%x data 0x%llx\n", msr, data); in kvm_set_msr_common()
2302 "0x%x data 0x%llx\n", msr, data); in kvm_set_msr_common()
2309 if (kvm_pmu_msr(vcpu, msr)) in kvm_set_msr_common()
2314 "0x%x data 0x%llx\n", msr, data); in kvm_set_msr_common()
2327 if (kvm_hv_msr_partition_wide(msr)) { in kvm_set_msr_common()
2330 r = set_msr_hyperv_pw(vcpu, msr, data); in kvm_set_msr_common()
2334 return set_msr_hyperv(vcpu, msr, data); in kvm_set_msr_common()
2340 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); in kvm_set_msr_common()
2353 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
2355 if (kvm_pmu_msr(vcpu, msr)) in kvm_set_msr_common()
2359 msr, data); in kvm_set_msr_common()
2363 msr, data); in kvm_set_msr_common()
2383 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) in get_msr_mtrr() argument
2387 if (!msr_mtrr_valid(msr)) in get_msr_mtrr()
2390 if (msr == MSR_MTRRdefType) in get_msr_mtrr()
2393 else if (msr == MSR_MTRRfix64K_00000) in get_msr_mtrr()
2395 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) in get_msr_mtrr()
2396 *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; in get_msr_mtrr()
2397 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) in get_msr_mtrr()
2398 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; in get_msr_mtrr()
2399 else if (msr == MSR_IA32_CR_PAT) in get_msr_mtrr()
2405 idx = (msr - 0x200) / 2; in get_msr_mtrr()
2406 is_mtrr_mask = msr - 0x200 - 2 * idx; in get_msr_mtrr()
2419 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) in get_msr_mce() argument
2425 switch (msr) { in get_msr_mce()
2442 if (msr >= MSR_IA32_MC0_CTL && in get_msr_mce()
2443 msr < MSR_IA32_MCx_CTL(bank_num)) { in get_msr_mce()
2444 u32 offset = msr - MSR_IA32_MC0_CTL; in get_msr_mce()
2454 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) in get_msr_hyperv_pw() argument
2459 switch (msr) { in get_msr_hyperv_pw()
2475 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); in get_msr_hyperv_pw()
2483 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) in get_msr_hyperv() argument
2487 switch (msr) { in get_msr_hyperv()
2509 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); in get_msr_hyperv()
2516 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) in kvm_get_msr_common() argument
2520 switch (msr) { in kvm_get_msr_common()
2549 if (kvm_pmu_msr(vcpu, msr)) in kvm_get_msr_common()
2550 return kvm_pmu_get_msr(vcpu, msr, pdata); in kvm_get_msr_common()
2560 return get_msr_mtrr(vcpu, msr, pdata); in kvm_get_msr_common()
2582 return kvm_x2apic_msr_read(vcpu, msr, pdata); in kvm_get_msr_common()
2625 return get_msr_mce(vcpu, msr, pdata); in kvm_get_msr_common()
2639 if (kvm_hv_msr_partition_wide(msr)) { in kvm_get_msr_common()
2642 r = get_msr_hyperv_pw(vcpu, msr, pdata); in kvm_get_msr_common()
2646 return get_msr_hyperv(vcpu, msr, pdata); in kvm_get_msr_common()
2672 if (kvm_pmu_msr(vcpu, msr)) in kvm_get_msr_common()
2673 return kvm_pmu_get_msr(vcpu, msr, pdata); in kvm_get_msr_common()
2675 vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); in kvm_get_msr_common()
2678 vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); in kvm_get_msr_common()
4957 struct msr_data msr; in emulator_set_msr() local
4959 msr.data = data; in emulator_set_msr()
4960 msr.index = msr_index; in emulator_set_msr()
4961 msr.host_initiated = false; in emulator_set_msr()
4962 return kvm_set_msr(emul_to_vcpu(ctxt), &msr); in emulator_set_msr()
7116 struct msr_data msr; in kvm_arch_vcpu_postcreate() local
7121 msr.data = 0x0; in kvm_arch_vcpu_postcreate()
7122 msr.index = MSR_IA32_TSC; in kvm_arch_vcpu_postcreate()
7123 msr.host_initiated = true; in kvm_arch_vcpu_postcreate()
7124 kvm_write_tsc(vcpu, &msr); in kvm_arch_vcpu_postcreate()