Lines Matching refs:vmx

876 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
877 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
1262 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) in __find_msr_index() argument
1266 for (i = 0; i < vmx->nmsrs; ++i) in __find_msr_index()
1267 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) in __find_msr_index()
1298 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) in find_msr_entry() argument
1302 i = __find_msr_index(vmx, msr); in find_msr_entry()
1304 return &vmx->guest_msrs[i]; in find_msr_entry()
1527 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_init() argument
1530 vmx->vm_entry_controls_shadow = val; in vm_entry_controls_init()
1533 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_set() argument
1535 if (vmx->vm_entry_controls_shadow != val) in vm_entry_controls_set()
1536 vm_entry_controls_init(vmx, val); in vm_entry_controls_set()
1539 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) in vm_entry_controls_get() argument
1541 return vmx->vm_entry_controls_shadow; in vm_entry_controls_get()
1545 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_setbit() argument
1547 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); in vm_entry_controls_setbit()
1550 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_clearbit() argument
1552 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); in vm_entry_controls_clearbit()
1555 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_init() argument
1558 vmx->vm_exit_controls_shadow = val; in vm_exit_controls_init()
1561 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_set() argument
1563 if (vmx->vm_exit_controls_shadow != val) in vm_exit_controls_set()
1564 vm_exit_controls_init(vmx, val); in vm_exit_controls_set()
1567 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) in vm_exit_controls_get() argument
1569 return vmx->vm_exit_controls_shadow; in vm_exit_controls_get()
1573 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_setbit() argument
1575 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); in vm_exit_controls_setbit()
1578 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_clearbit() argument
1580 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); in vm_exit_controls_clearbit()
1583 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) in vmx_segment_cache_clear() argument
1585 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_clear()
1588 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, in vmx_segment_cache_test_set() argument
1594 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { in vmx_segment_cache_test_set()
1595 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); in vmx_segment_cache_test_set()
1596 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_test_set()
1598 ret = vmx->segment_cache.bitmask & mask; in vmx_segment_cache_test_set()
1599 vmx->segment_cache.bitmask |= mask; in vmx_segment_cache_test_set()
1603 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_selector() argument
1605 u16 *p = &vmx->segment_cache.seg[seg].selector; in vmx_read_guest_seg_selector()
1607 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) in vmx_read_guest_seg_selector()
1612 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_base() argument
1614 ulong *p = &vmx->segment_cache.seg[seg].base; in vmx_read_guest_seg_base()
1616 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) in vmx_read_guest_seg_base()
1621 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_limit() argument
1623 u32 *p = &vmx->segment_cache.seg[seg].limit; in vmx_read_guest_seg_limit()
1625 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) in vmx_read_guest_seg_limit()
1630 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_ar() argument
1632 u32 *p = &vmx->segment_cache.seg[seg].ar; in vmx_read_guest_seg_ar()
1634 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) in vmx_read_guest_seg_ar()
1667 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, in clear_atomic_switch_msr_special() argument
1670 vm_entry_controls_clearbit(vmx, entry); in clear_atomic_switch_msr_special()
1671 vm_exit_controls_clearbit(vmx, exit); in clear_atomic_switch_msr_special()
1674 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) in clear_atomic_switch_msr() argument
1677 struct msr_autoload *m = &vmx->msr_autoload; in clear_atomic_switch_msr()
1682 clear_atomic_switch_msr_special(vmx, in clear_atomic_switch_msr()
1690 clear_atomic_switch_msr_special(vmx, in clear_atomic_switch_msr()
1711 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, in add_atomic_switch_msr_special() argument
1718 vm_entry_controls_setbit(vmx, entry); in add_atomic_switch_msr_special()
1719 vm_exit_controls_setbit(vmx, exit); in add_atomic_switch_msr_special()
1722 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, in add_atomic_switch_msr() argument
1726 struct msr_autoload *m = &vmx->msr_autoload; in add_atomic_switch_msr()
1731 add_atomic_switch_msr_special(vmx, in add_atomic_switch_msr()
1742 add_atomic_switch_msr_special(vmx, in add_atomic_switch_msr()
1793 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) in update_transition_efer() argument
1795 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer()
1821 clear_atomic_switch_msr(vmx, MSR_EFER); in update_transition_efer()
1829 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { in update_transition_efer()
1833 add_atomic_switch_msr(vmx, MSR_EFER, in update_transition_efer()
1840 vmx->guest_msrs[efer_offset].data = guest_efer; in update_transition_efer()
1841 vmx->guest_msrs[efer_offset].mask = ~ignore_bits; in update_transition_efer()
1885 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_save_host_state() local
1888 if (vmx->host_state.loaded) in vmx_save_host_state()
1891 vmx->host_state.loaded = 1; in vmx_save_host_state()
1896 vmx->host_state.ldt_sel = kvm_read_ldt(); in vmx_save_host_state()
1897 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; in vmx_save_host_state()
1898 savesegment(fs, vmx->host_state.fs_sel); in vmx_save_host_state()
1899 if (!(vmx->host_state.fs_sel & 7)) { in vmx_save_host_state()
1900 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); in vmx_save_host_state()
1901 vmx->host_state.fs_reload_needed = 0; in vmx_save_host_state()
1904 vmx->host_state.fs_reload_needed = 1; in vmx_save_host_state()
1906 savesegment(gs, vmx->host_state.gs_sel); in vmx_save_host_state()
1907 if (!(vmx->host_state.gs_sel & 7)) in vmx_save_host_state()
1908 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); in vmx_save_host_state()
1911 vmx->host_state.gs_ldt_reload_needed = 1; in vmx_save_host_state()
1915 savesegment(ds, vmx->host_state.ds_sel); in vmx_save_host_state()
1916 savesegment(es, vmx->host_state.es_sel); in vmx_save_host_state()
1923 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); in vmx_save_host_state()
1924 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); in vmx_save_host_state()
1928 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); in vmx_save_host_state()
1929 if (is_long_mode(&vmx->vcpu)) in vmx_save_host_state()
1930 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_save_host_state()
1933 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); in vmx_save_host_state()
1934 for (i = 0; i < vmx->save_nmsrs; ++i) in vmx_save_host_state()
1935 kvm_set_shared_msr(vmx->guest_msrs[i].index, in vmx_save_host_state()
1936 vmx->guest_msrs[i].data, in vmx_save_host_state()
1937 vmx->guest_msrs[i].mask); in vmx_save_host_state()
1940 static void __vmx_load_host_state(struct vcpu_vmx *vmx) in __vmx_load_host_state() argument
1942 if (!vmx->host_state.loaded) in __vmx_load_host_state()
1945 ++vmx->vcpu.stat.host_state_reload; in __vmx_load_host_state()
1946 vmx->host_state.loaded = 0; in __vmx_load_host_state()
1948 if (is_long_mode(&vmx->vcpu)) in __vmx_load_host_state()
1949 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in __vmx_load_host_state()
1951 if (vmx->host_state.gs_ldt_reload_needed) { in __vmx_load_host_state()
1952 kvm_load_ldt(vmx->host_state.ldt_sel); in __vmx_load_host_state()
1954 load_gs_index(vmx->host_state.gs_sel); in __vmx_load_host_state()
1956 loadsegment(gs, vmx->host_state.gs_sel); in __vmx_load_host_state()
1959 if (vmx->host_state.fs_reload_needed) in __vmx_load_host_state()
1960 loadsegment(fs, vmx->host_state.fs_sel); in __vmx_load_host_state()
1962 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) { in __vmx_load_host_state()
1963 loadsegment(ds, vmx->host_state.ds_sel); in __vmx_load_host_state()
1964 loadsegment(es, vmx->host_state.es_sel); in __vmx_load_host_state()
1969 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); in __vmx_load_host_state()
1971 if (vmx->host_state.msr_host_bndcfgs) in __vmx_load_host_state()
1972 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs); in __vmx_load_host_state()
1977 if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded) in __vmx_load_host_state()
1982 static void vmx_load_host_state(struct vcpu_vmx *vmx) in vmx_load_host_state() argument
1985 __vmx_load_host_state(vmx); in vmx_load_host_state()
2041 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load() local
2046 else if (vmx->loaded_vmcs->cpu != cpu) in vmx_vcpu_load()
2047 loaded_vmcs_clear(vmx->loaded_vmcs); in vmx_vcpu_load()
2049 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { in vmx_vcpu_load()
2050 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; in vmx_vcpu_load()
2051 vmcs_load(vmx->loaded_vmcs->vmcs); in vmx_vcpu_load()
2054 if (vmx->loaded_vmcs->cpu != cpu) { in vmx_vcpu_load()
2069 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, in vmx_vcpu_load()
2084 vmx->loaded_vmcs->cpu = cpu; in vmx_vcpu_load()
2089 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) { in vmx_vcpu_load()
2090 vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio; in vmx_vcpu_load()
2091 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); in vmx_vcpu_load()
2276 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_queue_exception() local
2288 if (vmx->rmode.vm86_active) { in vmx_queue_exception()
2299 vmx->vcpu.arch.event_exit_inst_len); in vmx_queue_exception()
2320 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) in move_msr_up() argument
2324 tmp = vmx->guest_msrs[to]; in move_msr_up()
2325 vmx->guest_msrs[to] = vmx->guest_msrs[from]; in move_msr_up()
2326 vmx->guest_msrs[from] = tmp; in move_msr_up()
2355 static void setup_msrs(struct vcpu_vmx *vmx) in setup_msrs() argument
2361 if (is_long_mode(&vmx->vcpu)) { in setup_msrs()
2362 index = __find_msr_index(vmx, MSR_SYSCALL_MASK); in setup_msrs()
2364 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2365 index = __find_msr_index(vmx, MSR_LSTAR); in setup_msrs()
2367 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2368 index = __find_msr_index(vmx, MSR_CSTAR); in setup_msrs()
2370 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2371 index = __find_msr_index(vmx, MSR_TSC_AUX); in setup_msrs()
2372 if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu)) in setup_msrs()
2373 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2378 index = __find_msr_index(vmx, MSR_STAR); in setup_msrs()
2379 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) in setup_msrs()
2380 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2383 index = __find_msr_index(vmx, MSR_EFER); in setup_msrs()
2384 if (index >= 0 && update_transition_efer(vmx, index)) in setup_msrs()
2385 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
2387 vmx->save_nmsrs = save_nmsrs; in setup_msrs()
2390 vmx_set_msr_bitmap(&vmx->vcpu); in setup_msrs()
2492 static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) in nested_vmx_setup_ctls_msrs() argument
2511 vmx->nested.nested_vmx_pinbased_ctls_low, in nested_vmx_setup_ctls_msrs()
2512 vmx->nested.nested_vmx_pinbased_ctls_high); in nested_vmx_setup_ctls_msrs()
2513 vmx->nested.nested_vmx_pinbased_ctls_low |= in nested_vmx_setup_ctls_msrs()
2515 vmx->nested.nested_vmx_pinbased_ctls_high &= in nested_vmx_setup_ctls_msrs()
2519 vmx->nested.nested_vmx_pinbased_ctls_high |= in nested_vmx_setup_ctls_msrs()
2522 if (vmx_cpu_uses_apicv(&vmx->vcpu)) in nested_vmx_setup_ctls_msrs()
2523 vmx->nested.nested_vmx_pinbased_ctls_high |= in nested_vmx_setup_ctls_msrs()
2528 vmx->nested.nested_vmx_exit_ctls_low, in nested_vmx_setup_ctls_msrs()
2529 vmx->nested.nested_vmx_exit_ctls_high); in nested_vmx_setup_ctls_msrs()
2530 vmx->nested.nested_vmx_exit_ctls_low = in nested_vmx_setup_ctls_msrs()
2533 vmx->nested.nested_vmx_exit_ctls_high &= in nested_vmx_setup_ctls_msrs()
2538 vmx->nested.nested_vmx_exit_ctls_high |= in nested_vmx_setup_ctls_msrs()
2544 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; in nested_vmx_setup_ctls_msrs()
2547 vmx->nested.nested_vmx_true_exit_ctls_low = in nested_vmx_setup_ctls_msrs()
2548 vmx->nested.nested_vmx_exit_ctls_low & in nested_vmx_setup_ctls_msrs()
2553 vmx->nested.nested_vmx_entry_ctls_low, in nested_vmx_setup_ctls_msrs()
2554 vmx->nested.nested_vmx_entry_ctls_high); in nested_vmx_setup_ctls_msrs()
2555 vmx->nested.nested_vmx_entry_ctls_low = in nested_vmx_setup_ctls_msrs()
2557 vmx->nested.nested_vmx_entry_ctls_high &= in nested_vmx_setup_ctls_msrs()
2562 vmx->nested.nested_vmx_entry_ctls_high |= in nested_vmx_setup_ctls_msrs()
2565 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; in nested_vmx_setup_ctls_msrs()
2568 vmx->nested.nested_vmx_true_entry_ctls_low = in nested_vmx_setup_ctls_msrs()
2569 vmx->nested.nested_vmx_entry_ctls_low & in nested_vmx_setup_ctls_msrs()
2574 vmx->nested.nested_vmx_procbased_ctls_low, in nested_vmx_setup_ctls_msrs()
2575 vmx->nested.nested_vmx_procbased_ctls_high); in nested_vmx_setup_ctls_msrs()
2576 vmx->nested.nested_vmx_procbased_ctls_low = in nested_vmx_setup_ctls_msrs()
2578 vmx->nested.nested_vmx_procbased_ctls_high &= in nested_vmx_setup_ctls_msrs()
2598 vmx->nested.nested_vmx_procbased_ctls_high |= in nested_vmx_setup_ctls_msrs()
2603 vmx->nested.nested_vmx_true_procbased_ctls_low = in nested_vmx_setup_ctls_msrs()
2604 vmx->nested.nested_vmx_procbased_ctls_low & in nested_vmx_setup_ctls_msrs()
2609 vmx->nested.nested_vmx_secondary_ctls_low, in nested_vmx_setup_ctls_msrs()
2610 vmx->nested.nested_vmx_secondary_ctls_high); in nested_vmx_setup_ctls_msrs()
2611 vmx->nested.nested_vmx_secondary_ctls_low = 0; in nested_vmx_setup_ctls_msrs()
2612 vmx->nested.nested_vmx_secondary_ctls_high &= in nested_vmx_setup_ctls_msrs()
2625 vmx->nested.nested_vmx_secondary_ctls_high |= in nested_vmx_setup_ctls_msrs()
2627 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | in nested_vmx_setup_ctls_msrs()
2630 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept; in nested_vmx_setup_ctls_msrs()
2636 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT; in nested_vmx_setup_ctls_msrs()
2638 vmx->nested.nested_vmx_ept_caps = 0; in nested_vmx_setup_ctls_msrs()
2647 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | in nested_vmx_setup_ctls_msrs()
2651 vmx->nested.nested_vmx_vpid_caps = 0; in nested_vmx_setup_ctls_msrs()
2654 vmx->nested.nested_vmx_secondary_ctls_high |= in nested_vmx_setup_ctls_msrs()
2659 vmx->nested.nested_vmx_misc_low, in nested_vmx_setup_ctls_msrs()
2660 vmx->nested.nested_vmx_misc_high); in nested_vmx_setup_ctls_msrs()
2661 vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA; in nested_vmx_setup_ctls_msrs()
2662 vmx->nested.nested_vmx_misc_low |= in nested_vmx_setup_ctls_msrs()
2665 vmx->nested.nested_vmx_misc_high = 0; in nested_vmx_setup_ctls_msrs()
2684 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_vmx_msr() local
2701 vmx->nested.nested_vmx_pinbased_ctls_low, in vmx_get_vmx_msr()
2702 vmx->nested.nested_vmx_pinbased_ctls_high); in vmx_get_vmx_msr()
2706 vmx->nested.nested_vmx_true_procbased_ctls_low, in vmx_get_vmx_msr()
2707 vmx->nested.nested_vmx_procbased_ctls_high); in vmx_get_vmx_msr()
2711 vmx->nested.nested_vmx_procbased_ctls_low, in vmx_get_vmx_msr()
2712 vmx->nested.nested_vmx_procbased_ctls_high); in vmx_get_vmx_msr()
2716 vmx->nested.nested_vmx_true_exit_ctls_low, in vmx_get_vmx_msr()
2717 vmx->nested.nested_vmx_exit_ctls_high); in vmx_get_vmx_msr()
2721 vmx->nested.nested_vmx_exit_ctls_low, in vmx_get_vmx_msr()
2722 vmx->nested.nested_vmx_exit_ctls_high); in vmx_get_vmx_msr()
2726 vmx->nested.nested_vmx_true_entry_ctls_low, in vmx_get_vmx_msr()
2727 vmx->nested.nested_vmx_entry_ctls_high); in vmx_get_vmx_msr()
2731 vmx->nested.nested_vmx_entry_ctls_low, in vmx_get_vmx_msr()
2732 vmx->nested.nested_vmx_entry_ctls_high); in vmx_get_vmx_msr()
2736 vmx->nested.nested_vmx_misc_low, in vmx_get_vmx_msr()
2737 vmx->nested.nested_vmx_misc_high); in vmx_get_vmx_msr()
2763 vmx->nested.nested_vmx_secondary_ctls_low, in vmx_get_vmx_msr()
2764 vmx->nested.nested_vmx_secondary_ctls_high); in vmx_get_vmx_msr()
2768 *pdata = vmx->nested.nested_vmx_ept_caps | in vmx_get_vmx_msr()
2769 ((u64)vmx->nested.nested_vmx_vpid_caps << 32); in vmx_get_vmx_msr()
2858 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr() local
2870 vmx_segment_cache_clear(vmx); in vmx_set_msr()
2874 vmx_segment_cache_clear(vmx); in vmx_set_msr()
2878 vmx_load_host_state(vmx); in vmx_set_msr()
2879 vmx->msr_guest_kernel_gs_base = data; in vmx_set_msr()
2917 vmx->nested.msr_ia32_feature_control = data; in vmx_set_msr()
2934 add_atomic_switch_msr(vmx, MSR_IA32_XSS, in vmx_set_msr()
2937 clear_atomic_switch_msr(vmx, MSR_IA32_XSS); in vmx_set_msr()
2947 msr = find_msr_entry(vmx, msr_index); in vmx_set_msr()
2951 if (msr - vmx->guest_msrs < vmx->save_nmsrs) { in vmx_set_msr()
3436 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode() local
3442 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode()
3443 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode()
3444 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode()
3445 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode()
3446 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode()
3447 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode()
3449 vmx->rmode.vm86_active = 0; in enter_pmode()
3451 vmx_segment_cache_clear(vmx); in enter_pmode()
3453 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode()
3457 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; in enter_pmode()
3465 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode()
3466 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode()
3467 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_pmode()
3468 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_pmode()
3469 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_pmode()
3470 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_pmode()
3509 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode() local
3511 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_rmode()
3512 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_rmode()
3513 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_rmode()
3514 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_rmode()
3515 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_rmode()
3516 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_rmode()
3517 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_rmode()
3519 vmx->rmode.vm86_active = 1; in enter_rmode()
3529 vmx_segment_cache_clear(vmx); in enter_rmode()
3536 vmx->rmode.save_rflags = flags; in enter_rmode()
3544 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_rmode()
3545 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_rmode()
3546 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_rmode()
3547 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_rmode()
3548 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_rmode()
3549 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_rmode()
3556 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer() local
3557 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); in vmx_set_efer()
3576 setup_msrs(vmx); in vmx_set_efer()
3709 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0() local
3718 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) in vmx_set_cr0()
3721 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) in vmx_set_cr0()
3745 vmx->emulation_required = emulation_required(vcpu); in vmx_set_cr0()
3837 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment() local
3840 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_get_segment()
3841 *var = vmx->rmode.segs[seg]; in vmx_get_segment()
3843 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) in vmx_get_segment()
3845 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3846 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3849 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3850 var->limit = vmx_read_guest_seg_limit(vmx, seg); in vmx_get_segment()
3851 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3852 ar = vmx_read_guest_seg_ar(vmx, seg); in vmx_get_segment()
3884 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl() local
3886 if (unlikely(vmx->rmode.vm86_active)) in vmx_get_cpl()
3889 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); in vmx_get_cpl()
3917 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_segment() local
3920 vmx_segment_cache_clear(vmx); in vmx_set_segment()
3922 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_set_segment()
3923 vmx->rmode.segs[seg] = *var; in vmx_set_segment()
3927 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); in vmx_set_segment()
3952 vmx->emulation_required = emulation_required(vcpu); in vmx_set_segment()
4500 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_complete_nested_posted_interrupt() local
4505 if (vmx->nested.pi_desc && in vmx_complete_nested_posted_interrupt()
4506 vmx->nested.pi_pending) { in vmx_complete_nested_posted_interrupt()
4507 vmx->nested.pi_pending = false; in vmx_complete_nested_posted_interrupt()
4508 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) in vmx_complete_nested_posted_interrupt()
4512 (unsigned long *)vmx->nested.pi_desc->pir, 256); in vmx_complete_nested_posted_interrupt()
4517 vapic_page = kmap(vmx->nested.virtual_apic_page); in vmx_complete_nested_posted_interrupt()
4522 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page); in vmx_complete_nested_posted_interrupt()
4523 kunmap(vmx->nested.virtual_apic_page); in vmx_complete_nested_posted_interrupt()
4539 struct vcpu_vmx *vmx = to_vmx(vcpu); in kvm_vcpu_trigger_posted_interrupt() local
4553 WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc)); in kvm_vcpu_trigger_posted_interrupt()
4566 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt() local
4569 vector == vmx->nested.posted_intr_nv) { in vmx_deliver_nested_posted_interrupt()
4576 vmx->nested.pi_pending = true; in vmx_deliver_nested_posted_interrupt()
4591 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt() local
4598 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) in vmx_deliver_posted_interrupt()
4601 r = pi_test_and_set_on(&vmx->pi_desc); in vmx_deliver_posted_interrupt()
4609 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr() local
4611 if (!pi_test_and_clear_on(&vmx->pi_desc)) in vmx_sync_pir_to_irr()
4614 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); in vmx_sync_pir_to_irr()
4628 static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) in vmx_set_constant_host_state() argument
4641 vmx->host_state.vmcs_host_cr4 = cr4; in vmx_set_constant_host_state()
4661 vmx->host_idt_base = dt.address; in vmx_set_constant_host_state()
4676 static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) in set_cr4_guest_host_mask() argument
4678 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; in set_cr4_guest_host_mask()
4680 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; in set_cr4_guest_host_mask()
4681 if (is_guest_mode(&vmx->vcpu)) in set_cr4_guest_host_mask()
4682 vmx->vcpu.arch.cr4_guest_owned_bits &= in set_cr4_guest_host_mask()
4683 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; in set_cr4_guest_host_mask()
4684 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); in set_cr4_guest_host_mask()
4687 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) in vmx_pin_based_exec_ctrl() argument
4691 if (!vmx_cpu_uses_apicv(&vmx->vcpu)) in vmx_pin_based_exec_ctrl()
4696 static u32 vmx_exec_control(struct vcpu_vmx *vmx) in vmx_exec_control() argument
4700 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) in vmx_exec_control()
4703 if (!cpu_need_tpr_shadow(&vmx->vcpu)) { in vmx_exec_control()
4717 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) in vmx_secondary_exec_control() argument
4720 if (!cpu_need_virtualize_apic_accesses(&vmx->vcpu)) in vmx_secondary_exec_control()
4722 if (vmx->vpid == 0) in vmx_secondary_exec_control()
4734 if (!vmx_cpu_uses_apicv(&vmx->vcpu)) in vmx_secondary_exec_control()
4769 static int vmx_vcpu_setup(struct vcpu_vmx *vmx) in vmx_vcpu_setup() argument
4790 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); in vmx_vcpu_setup()
4792 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); in vmx_vcpu_setup()
4796 vmx_secondary_exec_control(vmx)); in vmx_vcpu_setup()
4798 if (vmx_cpu_uses_apicv(&vmx->vcpu)) { in vmx_vcpu_setup()
4807 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); in vmx_vcpu_setup()
4812 vmx->ple_window = ple_window; in vmx_vcpu_setup()
4813 vmx->ple_window_dirty = true; in vmx_vcpu_setup()
4822 vmx_set_constant_host_state(vmx); in vmx_vcpu_setup()
4835 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); in vmx_vcpu_setup()
4837 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); in vmx_vcpu_setup()
4840 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in vmx_vcpu_setup()
4845 int j = vmx->nmsrs; in vmx_vcpu_setup()
4851 vmx->guest_msrs[j].index = i; in vmx_vcpu_setup()
4852 vmx->guest_msrs[j].data = 0; in vmx_vcpu_setup()
4853 vmx->guest_msrs[j].mask = -1ull; in vmx_vcpu_setup()
4854 ++vmx->nmsrs; in vmx_vcpu_setup()
4858 vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); in vmx_vcpu_setup()
4861 vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl); in vmx_vcpu_setup()
4864 set_cr4_guest_host_mask(vmx); in vmx_vcpu_setup()
4874 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset() local
4878 vmx->rmode.vm86_active = 0; in vmx_vcpu_reset()
4880 vmx->soft_vnmi_blocked = 0; in vmx_vcpu_reset()
4882 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); in vmx_vcpu_reset()
4894 vmx_segment_cache_clear(vmx); in vmx_vcpu_reset()
4936 setup_msrs(vmx); in vmx_vcpu_reset()
4951 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); in vmx_vcpu_reset()
4953 if (vmx->vpid != 0) in vmx_vcpu_reset()
4954 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in vmx_vcpu_reset()
4957 vmx->vcpu.arch.cr0 = cr0; in vmx_vcpu_reset()
4964 vpid_sync_context(vmx->vpid); in vmx_vcpu_reset()
5019 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq() local
5026 if (vmx->rmode.vm86_active) { in vmx_inject_irq()
5038 vmx->vcpu.arch.event_exit_inst_len); in vmx_inject_irq()
5046 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi() local
5060 vmx->soft_vnmi_blocked = 1; in vmx_inject_nmi()
5061 vmx->vnmi_blocked_time = 0; in vmx_inject_nmi()
5065 vmx->nmi_known_unmasked = false; in vmx_inject_nmi()
5066 if (vmx->rmode.vm86_active) { in vmx_inject_nmi()
5086 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask() local
5089 if (vmx->soft_vnmi_blocked != masked) { in vmx_set_nmi_mask()
5090 vmx->soft_vnmi_blocked = masked; in vmx_set_nmi_mask()
5091 vmx->vnmi_blocked_time = 0; in vmx_set_nmi_mask()
5094 vmx->nmi_known_unmasked = !masked; in vmx_set_nmi_mask()
5223 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception() local
5230 vect_info = vmx->idt_vectoring_info; in handle_exception()
5231 intr_info = vmx->exit_intr_info; in handle_exception()
5288 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) in handle_exception()
5316 vmx->vcpu.arch.event_exit_inst_len = in handle_exception()
5814 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch() local
5821 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); in handle_task_switch()
5822 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); in handle_task_switch()
5823 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); in handle_task_switch()
5839 if (vmx->idt_vectoring_info & in handle_task_switch()
5973 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state() local
5983 while (vmx->emulation_required && count-- != 0) { in handle_invalid_guest_state()
5985 return handle_interrupt_window(&vmx->vcpu); in handle_invalid_guest_state()
6051 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window() local
6052 int old = vmx->ple_window; in grow_ple_window()
6054 vmx->ple_window = __grow_ple_window(old); in grow_ple_window()
6056 if (vmx->ple_window != old) in grow_ple_window()
6057 vmx->ple_window_dirty = true; in grow_ple_window()
6059 trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); in grow_ple_window()
6064 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window() local
6065 int old = vmx->ple_window; in shrink_ple_window()
6067 vmx->ple_window = __shrink_ple_window(old, in shrink_ple_window()
6070 if (vmx->ple_window != old) in shrink_ple_window()
6071 vmx->ple_window_dirty = true; in shrink_ple_window()
6073 trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); in shrink_ple_window()
6397 static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx) in nested_get_current_vmcs02() argument
6400 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) in nested_get_current_vmcs02()
6401 if (item->vmptr == vmx->nested.current_vmptr) { in nested_get_current_vmcs02()
6402 list_move(&item->list, &vmx->nested.vmcs02_pool); in nested_get_current_vmcs02()
6406 if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) { in nested_get_current_vmcs02()
6408 item = list_entry(vmx->nested.vmcs02_pool.prev, in nested_get_current_vmcs02()
6410 item->vmptr = vmx->nested.current_vmptr; in nested_get_current_vmcs02()
6411 list_move(&item->list, &vmx->nested.vmcs02_pool); in nested_get_current_vmcs02()
6425 item->vmptr = vmx->nested.current_vmptr; in nested_get_current_vmcs02()
6426 list_add(&(item->list), &(vmx->nested.vmcs02_pool)); in nested_get_current_vmcs02()
6427 vmx->nested.vmcs02_num++; in nested_get_current_vmcs02()
6432 static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) in nested_free_vmcs02() argument
6435 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list) in nested_free_vmcs02()
6440 vmx->nested.vmcs02_num--; in nested_free_vmcs02()
6450 static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) in nested_free_all_saved_vmcss() argument
6454 WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); in nested_free_all_saved_vmcss()
6455 list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { in nested_free_all_saved_vmcss()
6460 if (vmx->loaded_vmcs == &item->vmcs02) in nested_free_all_saved_vmcss()
6466 vmx->nested.vmcs02_num--; in nested_free_all_saved_vmcss()
6521 struct vcpu_vmx *vmx = in vmx_preemption_timer_fn() local
6524 vmx->nested.preemption_timer_expired = true; in vmx_preemption_timer_fn()
6525 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); in vmx_preemption_timer_fn()
6526 kvm_vcpu_kick(&vmx->vcpu); in vmx_preemption_timer_fn()
6643 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmptr() local
6683 vmx->nested.vmxon_ptr = vmptr; in nested_vmx_check_vmptr()
6693 if (vmptr == vmx->nested.vmxon_ptr) { in nested_vmx_check_vmptr()
6708 if (vmptr == vmx->nested.vmxon_ptr) { in nested_vmx_check_vmptr()
6735 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmon() local
6766 if (vmx->nested.vmxon) { in handle_vmon()
6772 if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES) in handle_vmon()
6786 vmx->nested.current_shadow_vmcs = shadow_vmcs; in handle_vmon()
6789 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool)); in handle_vmon()
6790 vmx->nested.vmcs02_num = 0; in handle_vmon()
6792 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, in handle_vmon()
6794 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; in handle_vmon()
6796 vmx->nested.vmxon = true; in handle_vmon()
6811 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_permission() local
6813 if (!vmx->nested.vmxon) { in nested_vmx_check_permission()
6833 static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) in nested_release_vmcs12() argument
6835 if (vmx->nested.current_vmptr == -1ull) in nested_release_vmcs12()
6839 if (WARN_ON(vmx->nested.current_vmcs12 == NULL)) in nested_release_vmcs12()
6845 copy_shadow_to_vmcs12(vmx); in nested_release_vmcs12()
6846 vmx->nested.sync_shadow_vmcs = false; in nested_release_vmcs12()
6851 vmx->nested.posted_intr_nv = -1; in nested_release_vmcs12()
6852 kunmap(vmx->nested.current_vmcs12_page); in nested_release_vmcs12()
6853 nested_release_page(vmx->nested.current_vmcs12_page); in nested_release_vmcs12()
6854 vmx->nested.current_vmptr = -1ull; in nested_release_vmcs12()
6855 vmx->nested.current_vmcs12 = NULL; in nested_release_vmcs12()
6862 static void free_nested(struct vcpu_vmx *vmx) in free_nested() argument
6864 if (!vmx->nested.vmxon) in free_nested()
6867 vmx->nested.vmxon = false; in free_nested()
6868 free_vpid(vmx->nested.vpid02); in free_nested()
6869 nested_release_vmcs12(vmx); in free_nested()
6871 free_vmcs(vmx->nested.current_shadow_vmcs); in free_nested()
6873 if (vmx->nested.apic_access_page) { in free_nested()
6874 nested_release_page(vmx->nested.apic_access_page); in free_nested()
6875 vmx->nested.apic_access_page = NULL; in free_nested()
6877 if (vmx->nested.virtual_apic_page) { in free_nested()
6878 nested_release_page(vmx->nested.virtual_apic_page); in free_nested()
6879 vmx->nested.virtual_apic_page = NULL; in free_nested()
6881 if (vmx->nested.pi_desc_page) { in free_nested()
6882 kunmap(vmx->nested.pi_desc_page); in free_nested()
6883 nested_release_page(vmx->nested.pi_desc_page); in free_nested()
6884 vmx->nested.pi_desc_page = NULL; in free_nested()
6885 vmx->nested.pi_desc = NULL; in free_nested()
6888 nested_free_all_saved_vmcss(vmx); in free_nested()
6905 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmclear() local
6916 if (vmptr == vmx->nested.current_vmptr) in handle_vmclear()
6917 nested_release_vmcs12(vmx); in handle_vmclear()
6936 nested_free_vmcs02(vmx, vmptr); in handle_vmclear()
7042 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) in copy_shadow_to_vmcs12() argument
7047 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; in copy_shadow_to_vmcs12()
7074 vmcs12_write_any(&vmx->vcpu, field, field_value); in copy_shadow_to_vmcs12()
7078 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_shadow_to_vmcs12()
7083 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) in copy_vmcs12_to_shadow() argument
7096 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; in copy_vmcs12_to_shadow()
7103 vmcs12_read_any(&vmx->vcpu, field, &field_value); in copy_vmcs12_to_shadow()
7126 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_vmcs12_to_shadow()
7135 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmcs12() local
7136 if (vmx->nested.current_vmptr == -1ull) { in nested_vmx_check_vmcs12()
7243 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmptrld() local
7252 if (vmx->nested.current_vmptr != vmptr) { in handle_vmptrld()
7271 nested_release_vmcs12(vmx); in handle_vmptrld()
7272 vmx->nested.current_vmptr = vmptr; in handle_vmptrld()
7273 vmx->nested.current_vmcs12 = new_vmcs12; in handle_vmptrld()
7274 vmx->nested.current_vmcs12_page = page; in handle_vmptrld()
7279 __pa(vmx->nested.current_shadow_vmcs)); in handle_vmptrld()
7280 vmx->nested.sync_shadow_vmcs = true; in handle_vmptrld()
7318 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invept() local
7327 if (!(vmx->nested.nested_vmx_secondary_ctls_high & in handle_invept()
7329 !(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) { in handle_invept()
7345 types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; in handle_invept()
7384 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invvpid() local
7391 if (!(vmx->nested.nested_vmx_secondary_ctls_high & in handle_invvpid()
7393 !(vmx->nested.nested_vmx_vpid_caps & VMX_VPID_INVVPID_BIT)) { in handle_invvpid()
7404 types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7; in handle_invvpid()
7703 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_exit_handled() local
7705 u32 exit_reason = vmx->exit_reason; in nested_vmx_exit_handled()
7709 vmx->idt_vectoring_info, in nested_vmx_exit_handled()
7714 if (vmx->nested.nested_run_pending) in nested_vmx_exit_handled()
7717 if (unlikely(vmx->fail)) { in nested_vmx_exit_handled()
7842 static int vmx_create_pml_buffer(struct vcpu_vmx *vmx) in vmx_create_pml_buffer() argument
7850 vmx->pml_pg = pml_pg; in vmx_create_pml_buffer()
7852 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); in vmx_create_pml_buffer()
7858 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) in vmx_destroy_pml_buffer() argument
7860 if (vmx->pml_pg) { in vmx_destroy_pml_buffer()
7861 __free_page(vmx->pml_pg); in vmx_destroy_pml_buffer()
7862 vmx->pml_pg = NULL; in vmx_destroy_pml_buffer()
7868 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_pml_buffer() local
7884 pml_buf = page_address(vmx->pml_pg); in vmx_flush_pml_buffer()
8076 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit() local
8077 u32 exit_reason = vmx->exit_reason; in vmx_handle_exit()
8078 u32 vectoring_info = vmx->idt_vectoring_info; in vmx_handle_exit()
8093 if (vmx->emulation_required) in vmx_handle_exit()
8111 if (unlikely(vmx->fail)) { in vmx_handle_exit()
8137 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked && in vmx_handle_exit()
8141 vmx->soft_vnmi_blocked = 0; in vmx_handle_exit()
8142 } else if (vmx->vnmi_blocked_time > 1000000000LL && in vmx_handle_exit()
8153 vmx->soft_vnmi_blocked = 0; in vmx_handle_exit()
8214 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_apic_access_page_addr() local
8230 !nested_cpu_has2(vmx->nested.current_vmcs12, in vmx_set_apic_access_page_addr()
8309 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) in vmx_complete_atomic_exit() argument
8313 if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY in vmx_complete_atomic_exit()
8314 || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)) in vmx_complete_atomic_exit()
8317 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); in vmx_complete_atomic_exit()
8318 exit_intr_info = vmx->exit_intr_info; in vmx_complete_atomic_exit()
8327 kvm_before_handle_nmi(&vmx->vcpu); in vmx_complete_atomic_exit()
8329 kvm_after_handle_nmi(&vmx->vcpu); in vmx_complete_atomic_exit()
8347 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_external_intr() local
8353 desc = (gate_desc *)vmx->host_idt_base + vector; in vmx_handle_external_intr()
8396 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) in vmx_recover_nmi_blocking() argument
8403 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; in vmx_recover_nmi_blocking()
8406 if (vmx->nmi_known_unmasked) in vmx_recover_nmi_blocking()
8430 vmx->nmi_known_unmasked = in vmx_recover_nmi_blocking()
8433 } else if (unlikely(vmx->soft_vnmi_blocked)) in vmx_recover_nmi_blocking()
8434 vmx->vnmi_blocked_time += in vmx_recover_nmi_blocking()
8435 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); in vmx_recover_nmi_blocking()
8492 static void vmx_complete_interrupts(struct vcpu_vmx *vmx) in vmx_complete_interrupts() argument
8494 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, in vmx_complete_interrupts()
8509 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) in atomic_switch_perf_msrs() argument
8521 clear_atomic_switch_msr(vmx, msrs[i].msr); in atomic_switch_perf_msrs()
8523 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, in atomic_switch_perf_msrs()
8529 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run() local
8533 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) in vmx_vcpu_run()
8534 vmx->entry_time = ktime_get(); in vmx_vcpu_run()
8538 if (vmx->emulation_required) in vmx_vcpu_run()
8541 if (vmx->ple_window_dirty) { in vmx_vcpu_run()
8542 vmx->ple_window_dirty = false; in vmx_vcpu_run()
8543 vmcs_write32(PLE_WINDOW, vmx->ple_window); in vmx_vcpu_run()
8546 if (vmx->nested.sync_shadow_vmcs) { in vmx_vcpu_run()
8547 copy_vmcs12_to_shadow(vmx); in vmx_vcpu_run()
8548 vmx->nested.sync_shadow_vmcs = false; in vmx_vcpu_run()
8557 if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { in vmx_vcpu_run()
8559 vmx->host_state.vmcs_host_cr4 = cr4; in vmx_vcpu_run()
8570 atomic_switch_perf_msrs(vmx); in vmx_vcpu_run()
8573 vmx->__launched = vmx->loaded_vmcs->launched; in vmx_vcpu_run()
8647 : : "c"(vmx), "d"((unsigned long)HOST_RSP), in vmx_vcpu_run()
8703 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); in vmx_vcpu_run()
8705 vmx->loaded_vmcs->launched = 1; in vmx_vcpu_run()
8707 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); in vmx_vcpu_run()
8714 if (vmx->nested.nested_run_pending) in vmx_vcpu_run()
8717 vmx->nested.nested_run_pending = 0; in vmx_vcpu_run()
8719 vmx_complete_atomic_exit(vmx); in vmx_vcpu_run()
8720 vmx_recover_nmi_blocking(vmx); in vmx_vcpu_run()
8721 vmx_complete_interrupts(vmx); in vmx_vcpu_run()
8726 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_load_vmcs01() local
8729 if (vmx->loaded_vmcs == &vmx->vmcs01) in vmx_load_vmcs01()
8733 vmx->loaded_vmcs = &vmx->vmcs01; in vmx_load_vmcs01()
8742 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu() local
8745 vmx_destroy_pml_buffer(vmx); in vmx_free_vcpu()
8746 free_vpid(vmx->vpid); in vmx_free_vcpu()
8749 free_nested(vmx); in vmx_free_vcpu()
8750 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_free_vcpu()
8751 kfree(vmx->guest_msrs); in vmx_free_vcpu()
8753 kmem_cache_free(kvm_vcpu_cache, vmx); in vmx_free_vcpu()
8759 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in vmx_create_vcpu() local
8762 if (!vmx) in vmx_create_vcpu()
8765 vmx->vpid = allocate_vpid(); in vmx_create_vcpu()
8767 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); in vmx_create_vcpu()
8771 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); in vmx_create_vcpu()
8772 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) in vmx_create_vcpu()
8776 if (!vmx->guest_msrs) { in vmx_create_vcpu()
8780 vmx->loaded_vmcs = &vmx->vmcs01; in vmx_create_vcpu()
8781 vmx->loaded_vmcs->vmcs = alloc_vmcs(); in vmx_create_vcpu()
8782 if (!vmx->loaded_vmcs->vmcs) in vmx_create_vcpu()
8786 loaded_vmcs_init(vmx->loaded_vmcs); in vmx_create_vcpu()
8791 vmx_vcpu_load(&vmx->vcpu, cpu); in vmx_create_vcpu()
8792 vmx->vcpu.cpu = cpu; in vmx_create_vcpu()
8793 err = vmx_vcpu_setup(vmx); in vmx_create_vcpu()
8794 vmx_vcpu_put(&vmx->vcpu); in vmx_create_vcpu()
8798 if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { in vmx_create_vcpu()
8814 nested_vmx_setup_ctls_msrs(vmx); in vmx_create_vcpu()
8815 vmx->nested.vpid02 = allocate_vpid(); in vmx_create_vcpu()
8818 vmx->nested.posted_intr_nv = -1; in vmx_create_vcpu()
8819 vmx->nested.current_vmptr = -1ull; in vmx_create_vcpu()
8820 vmx->nested.current_vmcs12 = NULL; in vmx_create_vcpu()
8829 err = vmx_create_pml_buffer(vmx); in vmx_create_vcpu()
8834 return &vmx->vcpu; in vmx_create_vcpu()
8837 free_vpid(vmx->nested.vpid02); in vmx_create_vcpu()
8838 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_create_vcpu()
8840 kfree(vmx->guest_msrs); in vmx_create_vcpu()
8842 kvm_vcpu_uninit(&vmx->vcpu); in vmx_create_vcpu()
8844 free_vpid(vmx->vpid); in vmx_create_vcpu()
8845 kmem_cache_free(kvm_vcpu_cache, vmx); in vmx_create_vcpu()
8941 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_cpuid_update() local
8942 u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx); in vmx_cpuid_update()
8951 vmx->nested.nested_vmx_secondary_ctls_high |= in vmx_cpuid_update()
8954 vmx->nested.nested_vmx_secondary_ctls_high &= in vmx_cpuid_update()
8975 vmx->nested.nested_vmx_secondary_ctls_high |= in vmx_cpuid_update()
8978 vmx->nested.nested_vmx_secondary_ctls_high &= in vmx_cpuid_update()
9059 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vmcs12_pages() local
9073 if (vmx->nested.apic_access_page) /* shouldn't happen */ in nested_get_vmcs12_pages()
9074 nested_release_page(vmx->nested.apic_access_page); in nested_get_vmcs12_pages()
9075 vmx->nested.apic_access_page = in nested_get_vmcs12_pages()
9084 if (vmx->nested.virtual_apic_page) /* shouldn't happen */ in nested_get_vmcs12_pages()
9085 nested_release_page(vmx->nested.virtual_apic_page); in nested_get_vmcs12_pages()
9086 vmx->nested.virtual_apic_page = in nested_get_vmcs12_pages()
9099 if (!vmx->nested.virtual_apic_page) in nested_get_vmcs12_pages()
9108 if (vmx->nested.pi_desc_page) { /* shouldn't happen */ in nested_get_vmcs12_pages()
9109 kunmap(vmx->nested.pi_desc_page); in nested_get_vmcs12_pages()
9110 nested_release_page(vmx->nested.pi_desc_page); in nested_get_vmcs12_pages()
9112 vmx->nested.pi_desc_page = in nested_get_vmcs12_pages()
9114 if (!vmx->nested.pi_desc_page) in nested_get_vmcs12_pages()
9117 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
9118 (struct pi_desc *)kmap(vmx->nested.pi_desc_page); in nested_get_vmcs12_pages()
9119 if (!vmx->nested.pi_desc) { in nested_get_vmcs12_pages()
9120 nested_release_page_clean(vmx->nested.pi_desc_page); in nested_get_vmcs12_pages()
9123 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
9124 (struct pi_desc *)((void *)vmx->nested.pi_desc + in nested_get_vmcs12_pages()
9135 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_start_preemption_timer() local
9143 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); in vmx_start_preemption_timer()
9150 hrtimer_start(&vmx->nested.preemption_timer, in vmx_start_preemption_timer()
9470 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02() local
9515 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); in prepare_vmcs02()
9545 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; in prepare_vmcs02()
9546 vmx->nested.pi_pending = false; in prepare_vmcs02()
9549 page_to_phys(vmx->nested.pi_desc_page) + in prepare_vmcs02()
9557 vmx->nested.preemption_timer_expired = false; in prepare_vmcs02()
9587 exec_control = vmx_secondary_exec_control(vmx); in prepare_vmcs02()
9606 if (!vmx->nested.apic_access_page) in prepare_vmcs02()
9611 page_to_phys(vmx->nested.apic_access_page)); in prepare_vmcs02()
9613 cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { in prepare_vmcs02()
9642 vmx_set_constant_host_state(vmx); in prepare_vmcs02()
9651 vmx->host_rsp = 0; in prepare_vmcs02()
9653 exec_control = vmx_exec_control(vmx); /* L0's desires */ in prepare_vmcs02()
9661 page_to_phys(vmx->nested.virtual_apic_page)); in prepare_vmcs02()
9698 vm_entry_controls_init(vmx, in prepare_vmcs02()
9707 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
9710 set_cr4_guest_host_mask(vmx); in prepare_vmcs02()
9717 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); in prepare_vmcs02()
9719 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); in prepare_vmcs02()
9730 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { in prepare_vmcs02()
9731 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); in prepare_vmcs02()
9732 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { in prepare_vmcs02()
9733 vmx->nested.last_vpid = vmcs12->virtual_processor_id; in prepare_vmcs02()
9737 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in prepare_vmcs02()
9799 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_run() local
9813 copy_shadow_to_vmcs12(vmx); in nested_vmx_run()
9859 vmx->nested.nested_vmx_true_procbased_ctls_low, in nested_vmx_run()
9860 vmx->nested.nested_vmx_procbased_ctls_high) || in nested_vmx_run()
9862 vmx->nested.nested_vmx_secondary_ctls_low, in nested_vmx_run()
9863 vmx->nested.nested_vmx_secondary_ctls_high) || in nested_vmx_run()
9865 vmx->nested.nested_vmx_pinbased_ctls_low, in nested_vmx_run()
9866 vmx->nested.nested_vmx_pinbased_ctls_high) || in nested_vmx_run()
9868 vmx->nested.nested_vmx_true_exit_ctls_low, in nested_vmx_run()
9869 vmx->nested.nested_vmx_exit_ctls_high) || in nested_vmx_run()
9871 vmx->nested.nested_vmx_true_entry_ctls_low, in nested_vmx_run()
9872 vmx->nested.nested_vmx_entry_ctls_high)) in nested_vmx_run()
9941 vmcs02 = nested_get_current_vmcs02(vmx); in nested_vmx_run()
9947 vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); in nested_vmx_run()
9950 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); in nested_vmx_run()
9953 vmx->loaded_vmcs = vmcs02; in nested_vmx_run()
9959 vmx_segment_cache_clear(vmx); in nested_vmx_run()
9979 vmx->nested.nested_run_pending = 1; in nested_vmx_run()
10071 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_check_nested_events() local
10074 vmx->nested.preemption_timer_expired) { in vmx_check_nested_events()
10075 if (vmx->nested.nested_run_pending) in vmx_check_nested_events()
10082 if (vmx->nested.nested_run_pending || in vmx_check_nested_events()
10099 if (vmx->nested.nested_run_pending) in vmx_check_nested_events()
10427 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_vmexit() local
10431 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_vmexit()
10458 vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS)); in nested_vmx_vmexit()
10459 vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS)); in nested_vmx_vmexit()
10460 vmx_segment_cache_clear(vmx); in nested_vmx_vmexit()
10464 nested_free_vmcs02(vmx, vmx->nested.current_vmptr); in nested_vmx_vmexit()
10469 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); in nested_vmx_vmexit()
10472 vmx->host_rsp = 0; in nested_vmx_vmexit()
10475 if (vmx->nested.apic_access_page) { in nested_vmx_vmexit()
10476 nested_release_page(vmx->nested.apic_access_page); in nested_vmx_vmexit()
10477 vmx->nested.apic_access_page = NULL; in nested_vmx_vmexit()
10479 if (vmx->nested.virtual_apic_page) { in nested_vmx_vmexit()
10480 nested_release_page(vmx->nested.virtual_apic_page); in nested_vmx_vmexit()
10481 vmx->nested.virtual_apic_page = NULL; in nested_vmx_vmexit()
10483 if (vmx->nested.pi_desc_page) { in nested_vmx_vmexit()
10484 kunmap(vmx->nested.pi_desc_page); in nested_vmx_vmexit()
10485 nested_release_page(vmx->nested.pi_desc_page); in nested_vmx_vmexit()
10486 vmx->nested.pi_desc_page = NULL; in nested_vmx_vmexit()
10487 vmx->nested.pi_desc = NULL; in nested_vmx_vmexit()
10501 if (unlikely(vmx->fail)) { in nested_vmx_vmexit()
10502 vmx->fail = 0; in nested_vmx_vmexit()
10507 vmx->nested.sync_shadow_vmcs = true; in nested_vmx_vmexit()