Lines Matching refs:msr

1188 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)  in __find_msr_index()  argument
1193 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) in __find_msr_index()
1224 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) in find_msr_entry() argument
1228 i = __find_msr_index(vmx, msr); in find_msr_entry()
1600 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) in clear_atomic_switch_msr() argument
1605 switch (msr) { in clear_atomic_switch_msr()
1625 if (m->guest[i].index == msr) in clear_atomic_switch_msr()
1648 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, in add_atomic_switch_msr() argument
1654 switch (msr) { in add_atomic_switch_msr()
1687 if (m->guest[i].index == msr) in add_atomic_switch_msr()
1692 "Can't add msr %x\n", msr); in add_atomic_switch_msr()
1700 m->guest[i].index = msr; in add_atomic_switch_msr()
1702 m->host[i].index = msr; in add_atomic_switch_msr()
2646 struct shared_msr_entry *msr; in vmx_get_msr() local
2704 msr = find_msr_entry(to_vmx(vcpu), msr_index); in vmx_get_msr()
2705 if (msr) { in vmx_get_msr()
2706 data = msr->data; in vmx_get_msr()
2726 struct shared_msr_entry *msr; in vmx_set_msr() local
2814 msr = find_msr_entry(vmx, msr_index); in vmx_set_msr()
2815 if (msr) { in vmx_set_msr()
2816 u64 old_msr_data = msr->data; in vmx_set_msr()
2817 msr->data = data; in vmx_set_msr()
2818 if (msr - vmx->guest_msrs < vmx->save_nmsrs) { in vmx_set_msr()
2820 ret = kvm_set_shared_msr(msr->index, msr->data, in vmx_set_msr()
2821 msr->mask); in vmx_set_msr()
2824 msr->data = old_msr_data; in vmx_set_msr()
2860 u64 msr; in vmx_disabled_by_bios() local
2862 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); in vmx_disabled_by_bios()
2863 if (msr & FEATURE_CONTROL_LOCKED) { in vmx_disabled_by_bios()
2865 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) in vmx_disabled_by_bios()
2869 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) in vmx_disabled_by_bios()
2870 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) in vmx_disabled_by_bios()
2877 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) in vmx_disabled_by_bios()
2966 u32 msr, u32 *result) in adjust_vmx_controls() argument
2971 rdmsr(msr, vmx_msr_low, vmx_msr_high); in adjust_vmx_controls()
2984 static __init bool allow_1_setting(u32 msr, u32 ctl) in allow_1_setting() argument
2988 rdmsr(msr, vmx_msr_low, vmx_msr_high); in allow_1_setting()
3420 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); in vmx_set_efer() local
3422 if (!msr) in vmx_set_efer()
3433 msr->data = efer; in vmx_set_efer()
3437 msr->data = efer & ~EFER_LME; in vmx_set_efer()
4213 u32 msr, int type) in __vmx_disable_intercept_for_msr() argument
4225 if (msr <= 0x1fff) { in __vmx_disable_intercept_for_msr()
4228 __clear_bit(msr, msr_bitmap + 0x000 / f); in __vmx_disable_intercept_for_msr()
4232 __clear_bit(msr, msr_bitmap + 0x800 / f); in __vmx_disable_intercept_for_msr()
4234 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { in __vmx_disable_intercept_for_msr()
4235 msr &= 0x1fff; in __vmx_disable_intercept_for_msr()
4238 __clear_bit(msr, msr_bitmap + 0x400 / f); in __vmx_disable_intercept_for_msr()
4242 __clear_bit(msr, msr_bitmap + 0xc00 / f); in __vmx_disable_intercept_for_msr()
4248 u32 msr, int type) in __vmx_enable_intercept_for_msr() argument
4260 if (msr <= 0x1fff) { in __vmx_enable_intercept_for_msr()
4263 __set_bit(msr, msr_bitmap + 0x000 / f); in __vmx_enable_intercept_for_msr()
4267 __set_bit(msr, msr_bitmap + 0x800 / f); in __vmx_enable_intercept_for_msr()
4269 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { in __vmx_enable_intercept_for_msr()
4270 msr &= 0x1fff; in __vmx_enable_intercept_for_msr()
4273 __set_bit(msr, msr_bitmap + 0x400 / f); in __vmx_enable_intercept_for_msr()
4277 __set_bit(msr, msr_bitmap + 0xc00 / f); in __vmx_enable_intercept_for_msr()
4288 u32 msr, int type) in nested_vmx_disable_intercept_for_msr() argument
4302 if (msr <= 0x1fff) { in nested_vmx_disable_intercept_for_msr()
4304 !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) in nested_vmx_disable_intercept_for_msr()
4306 __clear_bit(msr, msr_bitmap_nested + 0x000 / f); in nested_vmx_disable_intercept_for_msr()
4309 !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) in nested_vmx_disable_intercept_for_msr()
4311 __clear_bit(msr, msr_bitmap_nested + 0x800 / f); in nested_vmx_disable_intercept_for_msr()
4313 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { in nested_vmx_disable_intercept_for_msr()
4314 msr &= 0x1fff; in nested_vmx_disable_intercept_for_msr()
4316 !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) in nested_vmx_disable_intercept_for_msr()
4318 __clear_bit(msr, msr_bitmap_nested + 0x400 / f); in nested_vmx_disable_intercept_for_msr()
4321 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) in nested_vmx_disable_intercept_for_msr()
4323 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); in nested_vmx_disable_intercept_for_msr()
4328 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) in vmx_disable_intercept_for_msr() argument
4332 msr, MSR_TYPE_R | MSR_TYPE_W); in vmx_disable_intercept_for_msr()
4334 msr, MSR_TYPE_R | MSR_TYPE_W); in vmx_disable_intercept_for_msr()
4337 static void vmx_enable_intercept_msr_read_x2apic(u32 msr) in vmx_enable_intercept_msr_read_x2apic() argument
4340 msr, MSR_TYPE_R); in vmx_enable_intercept_msr_read_x2apic()
4342 msr, MSR_TYPE_R); in vmx_enable_intercept_msr_read_x2apic()
4345 static void vmx_disable_intercept_msr_read_x2apic(u32 msr) in vmx_disable_intercept_msr_read_x2apic() argument
4348 msr, MSR_TYPE_R); in vmx_disable_intercept_msr_read_x2apic()
4350 msr, MSR_TYPE_R); in vmx_disable_intercept_msr_read_x2apic()
4353 static void vmx_disable_intercept_msr_write_x2apic(u32 msr) in vmx_disable_intercept_msr_write_x2apic() argument
4356 msr, MSR_TYPE_W); in vmx_disable_intercept_msr_write_x2apic()
4358 msr, MSR_TYPE_W); in vmx_disable_intercept_msr_write_x2apic()
5518 struct msr_data msr; in handle_wrmsr() local
5523 msr.data = data; in handle_wrmsr()
5524 msr.index = ecx; in handle_wrmsr()
5525 msr.host_initiated = false; in handle_wrmsr()
5526 if (kvm_set_msr(vcpu, &msr) != 0) { in handle_wrmsr()
6030 int r = -ENOMEM, i, msr; in hardware_setup() local
6171 for (msr = 0x800; msr <= 0x8ff; msr++) in hardware_setup()
6172 vmx_disable_intercept_msr_read_x2apic(msr); in hardware_setup()
8160 clear_atomic_switch_msr(vmx, msrs[i].msr); in atomic_switch_perf_msrs()
8162 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, in atomic_switch_perf_msrs()
8786 int msr; in nested_vmx_merge_msr_bitmap() local
8807 for (msr = 0x800; msr <= 0x8ff; msr++) in nested_vmx_merge_msr_bitmap()
8811 msr, MSR_TYPE_R); in nested_vmx_merge_msr_bitmap()
8837 for (msr = 0x800; msr <= 0x8ff; msr++) in nested_vmx_merge_msr_bitmap()
8840 msr, in nested_vmx_merge_msr_bitmap()
8989 struct msr_data msr; in nested_vmx_load_msr() local
8991 msr.host_initiated = false; in nested_vmx_load_msr()
9006 msr.index = e.index; in nested_vmx_load_msr()
9007 msr.data = e.value; in nested_vmx_load_msr()
9008 if (kvm_set_msr(vcpu, &msr)) { in nested_vmx_load_msr()