Lines Matching refs:msr

1262 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)  in __find_msr_index()  argument
1267 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) in __find_msr_index()
1298 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) in find_msr_entry() argument
1302 i = __find_msr_index(vmx, msr); in find_msr_entry()
1674 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) in clear_atomic_switch_msr() argument
1679 switch (msr) { in clear_atomic_switch_msr()
1699 if (m->guest[i].index == msr) in clear_atomic_switch_msr()
1722 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, in add_atomic_switch_msr() argument
1728 switch (msr) { in add_atomic_switch_msr()
1761 if (m->guest[i].index == msr) in add_atomic_switch_msr()
1766 "Can't add msr %x\n", msr); in add_atomic_switch_msr()
1774 m->guest[i].index = msr; in add_atomic_switch_msr()
1776 m->host[i].index = msr; in add_atomic_switch_msr()
2785 struct shared_msr_entry *msr; in vmx_get_msr() local
2838 msr = find_msr_entry(to_vmx(vcpu), msr_info->index); in vmx_get_msr()
2839 if (msr) { in vmx_get_msr()
2840 msr_info->data = msr->data; in vmx_get_msr()
2859 struct shared_msr_entry *msr; in vmx_set_msr() local
2947 msr = find_msr_entry(vmx, msr_index); in vmx_set_msr()
2948 if (msr) { in vmx_set_msr()
2949 u64 old_msr_data = msr->data; in vmx_set_msr()
2950 msr->data = data; in vmx_set_msr()
2951 if (msr - vmx->guest_msrs < vmx->save_nmsrs) { in vmx_set_msr()
2953 ret = kvm_set_shared_msr(msr->index, msr->data, in vmx_set_msr()
2954 msr->mask); in vmx_set_msr()
2957 msr->data = old_msr_data; in vmx_set_msr()
2993 u64 msr; in vmx_disabled_by_bios() local
2995 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); in vmx_disabled_by_bios()
2996 if (msr & FEATURE_CONTROL_LOCKED) { in vmx_disabled_by_bios()
2998 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) in vmx_disabled_by_bios()
3002 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) in vmx_disabled_by_bios()
3003 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) in vmx_disabled_by_bios()
3010 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) in vmx_disabled_by_bios()
3101 u32 msr, u32 *result) in adjust_vmx_controls() argument
3106 rdmsr(msr, vmx_msr_low, vmx_msr_high); in adjust_vmx_controls()
3119 static __init bool allow_1_setting(u32 msr, u32 ctl) in allow_1_setting() argument
3123 rdmsr(msr, vmx_msr_low, vmx_msr_high); in allow_1_setting()
3557 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); in vmx_set_efer() local
3559 if (!msr) in vmx_set_efer()
3570 msr->data = efer; in vmx_set_efer()
3574 msr->data = efer & ~EFER_LME; in vmx_set_efer()
4345 u32 msr, int type) in __vmx_disable_intercept_for_msr() argument
4357 if (msr <= 0x1fff) { in __vmx_disable_intercept_for_msr()
4360 __clear_bit(msr, msr_bitmap + 0x000 / f); in __vmx_disable_intercept_for_msr()
4364 __clear_bit(msr, msr_bitmap + 0x800 / f); in __vmx_disable_intercept_for_msr()
4366 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { in __vmx_disable_intercept_for_msr()
4367 msr &= 0x1fff; in __vmx_disable_intercept_for_msr()
4370 __clear_bit(msr, msr_bitmap + 0x400 / f); in __vmx_disable_intercept_for_msr()
4374 __clear_bit(msr, msr_bitmap + 0xc00 / f); in __vmx_disable_intercept_for_msr()
4380 u32 msr, int type) in __vmx_enable_intercept_for_msr() argument
4392 if (msr <= 0x1fff) { in __vmx_enable_intercept_for_msr()
4395 __set_bit(msr, msr_bitmap + 0x000 / f); in __vmx_enable_intercept_for_msr()
4399 __set_bit(msr, msr_bitmap + 0x800 / f); in __vmx_enable_intercept_for_msr()
4401 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { in __vmx_enable_intercept_for_msr()
4402 msr &= 0x1fff; in __vmx_enable_intercept_for_msr()
4405 __set_bit(msr, msr_bitmap + 0x400 / f); in __vmx_enable_intercept_for_msr()
4409 __set_bit(msr, msr_bitmap + 0xc00 / f); in __vmx_enable_intercept_for_msr()
4420 u32 msr, int type) in nested_vmx_disable_intercept_for_msr() argument
4434 if (msr <= 0x1fff) { in nested_vmx_disable_intercept_for_msr()
4436 !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) in nested_vmx_disable_intercept_for_msr()
4438 __clear_bit(msr, msr_bitmap_nested + 0x000 / f); in nested_vmx_disable_intercept_for_msr()
4441 !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) in nested_vmx_disable_intercept_for_msr()
4443 __clear_bit(msr, msr_bitmap_nested + 0x800 / f); in nested_vmx_disable_intercept_for_msr()
4445 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { in nested_vmx_disable_intercept_for_msr()
4446 msr &= 0x1fff; in nested_vmx_disable_intercept_for_msr()
4448 !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) in nested_vmx_disable_intercept_for_msr()
4450 __clear_bit(msr, msr_bitmap_nested + 0x400 / f); in nested_vmx_disable_intercept_for_msr()
4453 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) in nested_vmx_disable_intercept_for_msr()
4455 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); in nested_vmx_disable_intercept_for_msr()
4460 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) in vmx_disable_intercept_for_msr() argument
4464 msr, MSR_TYPE_R | MSR_TYPE_W); in vmx_disable_intercept_for_msr()
4466 msr, MSR_TYPE_R | MSR_TYPE_W); in vmx_disable_intercept_for_msr()
4469 static void vmx_enable_intercept_msr_read_x2apic(u32 msr) in vmx_enable_intercept_msr_read_x2apic() argument
4472 msr, MSR_TYPE_R); in vmx_enable_intercept_msr_read_x2apic()
4474 msr, MSR_TYPE_R); in vmx_enable_intercept_msr_read_x2apic()
4477 static void vmx_disable_intercept_msr_read_x2apic(u32 msr) in vmx_disable_intercept_msr_read_x2apic() argument
4480 msr, MSR_TYPE_R); in vmx_disable_intercept_msr_read_x2apic()
4482 msr, MSR_TYPE_R); in vmx_disable_intercept_msr_read_x2apic()
4485 static void vmx_disable_intercept_msr_write_x2apic(u32 msr) in vmx_disable_intercept_msr_write_x2apic() argument
4488 msr, MSR_TYPE_W); in vmx_disable_intercept_msr_write_x2apic()
4490 msr, MSR_TYPE_W); in vmx_disable_intercept_msr_write_x2apic()
5664 struct msr_data msr; in handle_wrmsr() local
5669 msr.data = data; in handle_wrmsr()
5670 msr.index = ecx; in handle_wrmsr()
5671 msr.host_initiated = false; in handle_wrmsr()
5672 if (kvm_set_msr(vcpu, &msr) != 0) { in handle_wrmsr()
6112 int r = -ENOMEM, i, msr; in hardware_setup() local
6259 for (msr = 0x800; msr <= 0x8ff; msr++) in hardware_setup()
6260 vmx_disable_intercept_msr_read_x2apic(msr); in hardware_setup()
8521 clear_atomic_switch_msr(vmx, msrs[i].msr); in atomic_switch_perf_msrs()
8523 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, in atomic_switch_perf_msrs()
9183 int msr; in nested_vmx_merge_msr_bitmap() local
9204 for (msr = 0x800; msr <= 0x8ff; msr++) in nested_vmx_merge_msr_bitmap()
9208 msr, MSR_TYPE_R); in nested_vmx_merge_msr_bitmap()
9234 for (msr = 0x800; msr <= 0x8ff; msr++) in nested_vmx_merge_msr_bitmap()
9237 msr, in nested_vmx_merge_msr_bitmap()
9386 struct msr_data msr; in nested_vmx_load_msr() local
9388 msr.host_initiated = false; in nested_vmx_load_msr()
9403 msr.index = e.index; in nested_vmx_load_msr()
9404 msr.data = e.value; in nested_vmx_load_msr()
9405 if (kvm_set_msr(vcpu, &msr)) { in nested_vmx_load_msr()