nested 147 arch/powerpc/include/asm/kvm_book3s_64.h if (vcpu->arch.nested) nested 148 arch/powerpc/include/asm/kvm_book3s_64.h radix = vcpu->arch.nested->radix; nested 812 arch/powerpc/include/asm/kvm_host.h struct kvm_nested_guest *nested; nested 561 arch/powerpc/include/asm/kvm_ppc.h struct kvm_nested_guest *nested); nested 515 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_NESTED, kvm_vcpu, arch.nested); nested 444 arch/powerpc/kernel/traps.c bool nested = in_nmi(); nested 451 arch/powerpc/kernel/traps.c if (!nested) nested 524 arch/powerpc/kernel/traps.c if (!nested) nested 826 arch/powerpc/kernel/traps.c bool nested = in_nmi(); nested 827 arch/powerpc/kernel/traps.c if (!nested) nested 854 arch/powerpc/kernel/traps.c if (!nested) nested 866 arch/powerpc/kernel/traps.c if (!nested) nested 95 arch/powerpc/kvm/book3s_64_mmu_radix.c if (vcpu->arch.nested) nested 96 arch/powerpc/kvm/book3s_64_mmu_radix.c lpid = vcpu->arch.nested->shadow_lpid; nested 1198 arch/powerpc/kvm/book3s_64_mmu_radix.c struct kvm_nested_guest *nested; nested 1233 arch/powerpc/kvm/book3s_64_mmu_radix.c nested = NULL; nested 1239 arch/powerpc/kvm/book3s_64_mmu_radix.c if (nested) { nested 1240 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmhv_put_nested(nested); nested 1241 arch/powerpc/kvm/book3s_64_mmu_radix.c nested = NULL; nested 1252 arch/powerpc/kvm/book3s_64_mmu_radix.c nested = kvmhv_get_nested(kvm, p->lpid, false); nested 1253 arch/powerpc/kvm/book3s_64_mmu_radix.c if (!nested) { nested 1257 arch/powerpc/kvm/book3s_64_mmu_radix.c pgt = nested->shadow_pgtable; nested 1331 arch/powerpc/kvm/book3s_64_mmu_radix.c if (nested) nested 1332 arch/powerpc/kvm/book3s_64_mmu_radix.c kvmhv_put_nested(nested); nested 124 arch/powerpc/kvm/book3s_hv.c static bool nested = true; nested 125 arch/powerpc/kvm/book3s_hv.c module_param(nested, bool, S_IRUGO | S_IWUSR); nested 126 arch/powerpc/kvm/book3s_hv.c MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)"); nested 2535 arch/powerpc/kvm/book3s_hv.c struct kvm_nested_guest *nested = vcpu->arch.nested; nested 2540 arch/powerpc/kvm/book3s_hv.c if (nested) { nested 2541 arch/powerpc/kvm/book3s_hv.c cpumask_set_cpu(cpu, &nested->need_tlb_flush); nested 2542 arch/powerpc/kvm/book3s_hv.c cpu_in_guest = &nested->cpu_in_guest; nested 2560 arch/powerpc/kvm/book3s_hv.c struct kvm_nested_guest *nested = vcpu->arch.nested; nested 2567 arch/powerpc/kvm/book3s_hv.c if (nested) nested 2568 arch/powerpc/kvm/book3s_hv.c prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; nested 2589 arch/powerpc/kvm/book3s_hv.c if (nested) nested 2590 arch/powerpc/kvm/book3s_hv.c nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; nested 3605 arch/powerpc/kvm/book3s_hv.c if (vcpu->arch.nested) { nested 3606 arch/powerpc/kvm/book3s_hv.c hvregs.lpid = vcpu->arch.nested->shadow_lpid; nested 3623 arch/powerpc/kvm/book3s_hv.c if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && nested 4044 arch/powerpc/kvm/book3s_hv.c struct kvm_nested_guest *nested = vcpu->arch.nested; nested 4088 arch/powerpc/kvm/book3s_hv.c if (!nested) { nested 4118 arch/powerpc/kvm/book3s_hv.c lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; nested 4121 arch/powerpc/kvm/book3s_hv.c kvmppc_check_need_tlb_flush(kvm, pcpu, nested); nested 4172 arch/powerpc/kvm/book3s_hv.c if (!nested) nested 5351 arch/powerpc/kvm/book3s_hv.c if (!nested) nested 5375 arch/powerpc/kvm/book3s_hv.c if (rc && vcpu->arch.nested) nested 5394 arch/powerpc/kvm/book3s_hv.c if (rc && vcpu->arch.nested) nested 839 arch/powerpc/kvm/book3s_hv_builtin.c struct kvm_nested_guest *nested) nested 852 arch/powerpc/kvm/book3s_hv_builtin.c if (nested) nested 853 arch/powerpc/kvm/book3s_hv_builtin.c need_tlb_flush = &nested->need_tlb_flush; nested 275 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.nested = l2; nested 307 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.nested = NULL; nested 1433 arch/powerpc/kvm/book3s_hv_nested.c struct kvm_nested_guest *gp = vcpu->arch.nested; nested 172 arch/s390/kvm/kvm-s390.c static int nested; nested 173 arch/s390/kvm/kvm-s390.c module_param(nested, int, S_IRUGO); nested 174 arch/s390/kvm/kvm-s390.c MODULE_PARM_DESC(nested, "Nested virtualization support"); nested 415 arch/s390/kvm/kvm-s390.c !test_facility(3) || !nested) nested 4581 arch/s390/kvm/kvm-s390.c if (nested && hpage) { nested 59 arch/um/include/shared/kern_util.h extern unsigned long from_irq_stack(int nested); nested 542 arch/um/kernel/irq.c int nested; nested 564 arch/um/kernel/irq.c nested = (ti->real_thread != NULL); nested 565 arch/um/kernel/irq.c if (!nested) { nested 578 arch/um/kernel/irq.c *mask_out |= mask | nested; nested 582 arch/um/kernel/irq.c unsigned long from_irq_stack(int nested) nested 158 arch/um/os-Linux/signal.c int nested, bail; nested 174 arch/um/os-Linux/signal.c nested = pending & 1; nested 189 arch/um/os-Linux/signal.c if (!nested) nested 190 arch/um/os-Linux/signal.c pending = from_irq_stack(nested); nested 221 arch/x86/kvm/svm.c struct nested_state nested; nested 356 arch/x86/kvm/svm.c static int nested = true; nested 357 arch/x86/kvm/svm.c module_param(nested, int, S_IRUGO); nested 512 arch/x86/kvm/svm.c h = &svm->nested.hsave->control; nested 513 arch/x86/kvm/svm.c g = &svm->nested; nested 524 arch/x86/kvm/svm.c return svm->nested.hsave; nested 1048 arch/x86/kvm/svm.c msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: nested 1379 arch/x86/kvm/svm.c if (nested) { nested 1495 arch/x86/kvm/svm.c return svm->nested.hsave->control.tsc_offset; nested 1508 arch/x86/kvm/svm.c svm->nested.hsave->control.tsc_offset; nested 1509 arch/x86/kvm/svm.c svm->nested.hsave->control.tsc_offset = offset; nested 1654 arch/x86/kvm/svm.c svm->nested.vmcb = 0; nested 2245 arch/x86/kvm/svm.c svm->nested.hsave = page_address(hsave_page); nested 2250 arch/x86/kvm/svm.c svm->nested.msrpm = page_address(nested_msrpm_pages); nested 2251 arch/x86/kvm/svm.c svm_vcpu_init_msrpm(svm->nested.msrpm); nested 2304 arch/x86/kvm/svm.c __free_page(virt_to_page(svm->nested.hsave)); nested 2305 arch/x86/kvm/svm.c __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); nested 2973 arch/x86/kvm/svm.c return svm->nested.nested_cr3; nested 2979 arch/x86/kvm/svm.c u64 cr3 = svm->nested.nested_cr3; nested 3092 arch/x86/kvm/svm.c svm->nested.exit_required = true; nested 3113 arch/x86/kvm/svm.c if (svm->nested.exit_required) nested 3120 arch/x86/kvm/svm.c if (svm->nested.intercept & 1ULL) { nested 3127 arch/x86/kvm/svm.c svm->nested.exit_required = true; nested 3141 arch/x86/kvm/svm.c if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) nested 3145 arch/x86/kvm/svm.c svm->nested.exit_required = true; nested 3157 arch/x86/kvm/svm.c if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) nested 3163 arch/x86/kvm/svm.c gpa = svm->nested.vmcb_iopm + (port / 8); nested 3180 arch/x86/kvm/svm.c if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) nested 3194 arch/x86/kvm/svm.c if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4)) nested 3268 arch/x86/kvm/svm.c if (svm->nested.intercept_cr & bit) nested 3274 arch/x86/kvm/svm.c if (svm->nested.intercept_dr & bit) nested 3280 arch/x86/kvm/svm.c if (svm->nested.intercept_exceptions & excp_bits) { nested 3298 arch/x86/kvm/svm.c if (svm->nested.intercept & exit_bits) nested 3354 arch/x86/kvm/svm.c struct vmcb *hsave = svm->nested.hsave; nested 3365 arch/x86/kvm/svm.c rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map); nested 3376 arch/x86/kvm/svm.c svm->nested.vmcb = 0; nested 3448 arch/x86/kvm/svm.c svm->nested.nested_cr3 = 0; nested 3502 arch/x86/kvm/svm.c if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) nested 3513 arch/x86/kvm/svm.c offset = svm->nested.vmcb_msrpm + (p * 4); nested 3518 arch/x86/kvm/svm.c svm->nested.msrpm[p] = svm->msrpm[p] | value; nested 3521 arch/x86/kvm/svm.c svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); nested 3550 arch/x86/kvm/svm.c svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; nested 3587 arch/x86/kvm/svm.c svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL; nested 3588 arch/x86/kvm/svm.c svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; nested 3591 arch/x86/kvm/svm.c svm->nested.intercept_cr = nested_vmcb->control.intercept_cr; nested 3592 arch/x86/kvm/svm.c svm->nested.intercept_dr = nested_vmcb->control.intercept_dr; nested 3593 arch/x86/kvm/svm.c svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; nested 3594 arch/x86/kvm/svm.c svm->nested.intercept = nested_vmcb->control.intercept; nested 3637 arch/x86/kvm/svm.c svm->nested.vmcb = vmcb_gpa; nested 3648 arch/x86/kvm/svm.c struct vmcb *hsave = svm->nested.hsave; nested 4010 arch/x86/kvm/svm.c intercept = svm->nested.intercept; nested 4226 arch/x86/kvm/svm.c msr_info->data = svm->nested.hsave_msr; nested 4229 arch/x86/kvm/svm.c msr_info->data = svm->nested.vm_cr_msr; nested 4287 arch/x86/kvm/svm.c if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) nested 4290 arch/x86/kvm/svm.c svm->nested.vm_cr_msr &= ~chg_mask; nested 4291 arch/x86/kvm/svm.c svm->nested.vm_cr_msr |= (data & chg_mask); nested 4293 arch/x86/kvm/svm.c svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; nested 4427 arch/x86/kvm/svm.c svm->nested.hsave_msr = data; nested 4989 arch/x86/kvm/svm.c if (unlikely(svm->nested.exit_required)) { nested 4991 arch/x86/kvm/svm.c svm->nested.exit_required = false; nested 5505 arch/x86/kvm/svm.c if (svm->nested.exit_required) nested 5662 arch/x86/kvm/svm.c if (unlikely(svm->nested.exit_required)) nested 5966 arch/x86/kvm/svm.c if (nested) nested 6142 arch/x86/kvm/svm.c intercept = svm->nested.intercept; nested 6264 arch/x86/kvm/svm.c svm->nested.intercept & (1ULL << INTERCEPT_SMI)) { nested 6267 arch/x86/kvm/svm.c svm->nested.exit_required = true; nested 6283 arch/x86/kvm/svm.c put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb); nested 60 arch/x86/kvm/vmx/capabilities.h struct nested_vmx_msrs nested; nested 343 arch/x86/kvm/vmx/evmcs.c if (vmx->nested.enlightened_vmcs_enabled) nested 353 arch/x86/kvm/vmx/evmcs.c bool evmcs_already_enabled = vmx->nested.enlightened_vmcs_enabled; nested 355 arch/x86/kvm/vmx/evmcs.c vmx->nested.enlightened_vmcs_enabled = true; nested 364 arch/x86/kvm/vmx/evmcs.c vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; nested 365 arch/x86/kvm/vmx/evmcs.c vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; nested 366 arch/x86/kvm/vmx/evmcs.c vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; nested 367 arch/x86/kvm/vmx/evmcs.c vmx->nested.msrs.secondary_ctls_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC; nested 368 arch/x86/kvm/vmx/evmcs.c vmx->nested.msrs.vmfunc_controls &= ~EVMCS1_UNSUPPORTED_VMFUNC; nested 179 arch/x86/kvm/vmx/nested.c if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) nested 215 arch/x86/kvm/vmx/nested.c vmx->nested.need_vmcs12_to_shadow_sync = false; nested 222 arch/x86/kvm/vmx/nested.c if (!vmx->nested.hv_evmcs) nested 225 arch/x86/kvm/vmx/nested.c kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); nested 226 arch/x86/kvm/vmx/nested.c vmx->nested.hv_evmcs_vmptr = 0; nested 227 arch/x86/kvm/vmx/nested.c vmx->nested.hv_evmcs = NULL; nested 238 arch/x86/kvm/vmx/nested.c if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) nested 243 arch/x86/kvm/vmx/nested.c vmx->nested.vmxon = false; nested 244 arch/x86/kvm/vmx/nested.c vmx->nested.smm.vmxon = false; nested 245 arch/x86/kvm/vmx/nested.c free_vpid(vmx->nested.vpid02); nested 246 arch/x86/kvm/vmx/nested.c vmx->nested.posted_intr_nv = -1; nested 247 arch/x86/kvm/vmx/nested.c vmx->nested.current_vmptr = -1ull; nested 254 arch/x86/kvm/vmx/nested.c kfree(vmx->nested.cached_vmcs12); nested 255 arch/x86/kvm/vmx/nested.c vmx->nested.cached_vmcs12 = NULL; nested 256 arch/x86/kvm/vmx/nested.c kfree(vmx->nested.cached_shadow_vmcs12); nested 257 arch/x86/kvm/vmx/nested.c vmx->nested.cached_shadow_vmcs12 = NULL; nested 259 arch/x86/kvm/vmx/nested.c if (vmx->nested.apic_access_page) { nested 260 arch/x86/kvm/vmx/nested.c kvm_release_page_dirty(vmx->nested.apic_access_page); nested 261 arch/x86/kvm/vmx/nested.c vmx->nested.apic_access_page = NULL; nested 263 arch/x86/kvm/vmx/nested.c kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); nested 264 arch/x86/kvm/vmx/nested.c kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); nested 265 arch/x86/kvm/vmx/nested.c vmx->nested.pi_desc = NULL; nested 271 arch/x86/kvm/vmx/nested.c free_loaded_vmcs(&vmx->nested.vmcs02); nested 333 arch/x86/kvm/vmx/nested.c if (vmx->nested.pml_full) { nested 335 arch/x86/kvm/vmx/nested.c vmx->nested.pml_full = false; nested 352 arch/x86/kvm/vmx/nested.c to_vmx(vcpu)->nested.msrs.ept_caps & nested 429 arch/x86/kvm/vmx/nested.c !to_vmx(vcpu)->nested.nested_run_pending) { nested 566 arch/x86/kvm/vmx/nested.c unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; nested 567 arch/x86/kvm/vmx/nested.c struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map; nested 653 arch/x86/kvm/vmx/nested.c kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false); nested 882 arch/x86/kvm/vmx/nested.c u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, nested 883 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.misc_high); nested 1038 arch/x86/kvm/vmx/nested.c (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); nested 1045 arch/x86/kvm/vmx/nested.c return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; nested 1063 arch/x86/kvm/vmx/nested.c u64 vmx_basic = vmx->nested.msrs.basic; nested 1082 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.basic = data; nested 1094 arch/x86/kvm/vmx/nested.c lowp = &vmx->nested.msrs.pinbased_ctls_low; nested 1095 arch/x86/kvm/vmx/nested.c highp = &vmx->nested.msrs.pinbased_ctls_high; nested 1098 arch/x86/kvm/vmx/nested.c lowp = &vmx->nested.msrs.procbased_ctls_low; nested 1099 arch/x86/kvm/vmx/nested.c highp = &vmx->nested.msrs.procbased_ctls_high; nested 1102 arch/x86/kvm/vmx/nested.c lowp = &vmx->nested.msrs.exit_ctls_low; nested 1103 arch/x86/kvm/vmx/nested.c highp = &vmx->nested.msrs.exit_ctls_high; nested 1106 arch/x86/kvm/vmx/nested.c lowp = &vmx->nested.msrs.entry_ctls_low; nested 1107 arch/x86/kvm/vmx/nested.c highp = &vmx->nested.msrs.entry_ctls_high; nested 1110 arch/x86/kvm/vmx/nested.c lowp = &vmx->nested.msrs.secondary_ctls_low; nested 1111 arch/x86/kvm/vmx/nested.c highp = &vmx->nested.msrs.secondary_ctls_high; nested 1142 arch/x86/kvm/vmx/nested.c vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, nested 1143 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.misc_high); nested 1148 arch/x86/kvm/vmx/nested.c if ((vmx->nested.msrs.pinbased_ctls_high & nested 1163 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.misc_low = data; nested 1164 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.misc_high = data >> 32; nested 1173 arch/x86/kvm/vmx/nested.c vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, nested 1174 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.vpid_caps); nested 1180 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.ept_caps = data; nested 1181 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.vpid_caps = data >> 32; nested 1191 arch/x86/kvm/vmx/nested.c msr = &vmx->nested.msrs.cr0_fixed0; nested 1194 arch/x86/kvm/vmx/nested.c msr = &vmx->nested.msrs.cr4_fixed0; nested 1224 arch/x86/kvm/vmx/nested.c if (vmx->nested.vmxon) nested 1265 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.vmcs_enum = data; nested 1268 arch/x86/kvm/vmx/nested.c if (data & ~vmx->nested.msrs.vmfunc_controls) nested 1270 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.vmfunc_controls = data; nested 1429 arch/x86/kvm/vmx/nested.c struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; nested 1430 arch/x86/kvm/vmx/nested.c struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; nested 1649 arch/x86/kvm/vmx/nested.c struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; nested 1650 arch/x86/kvm/vmx/nested.c struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; nested 1825 arch/x86/kvm/vmx/nested.c if (likely(!vmx->nested.enlightened_vmcs_enabled)) nested 1831 arch/x86/kvm/vmx/nested.c if (unlikely(!vmx->nested.hv_evmcs || nested 1832 arch/x86/kvm/vmx/nested.c evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { nested 1833 arch/x86/kvm/vmx/nested.c if (!vmx->nested.hv_evmcs) nested 1834 arch/x86/kvm/vmx/nested.c vmx->nested.current_vmptr = -1ull; nested 1839 arch/x86/kvm/vmx/nested.c &vmx->nested.hv_evmcs_map)) nested 1842 arch/x86/kvm/vmx/nested.c vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; nested 1866 arch/x86/kvm/vmx/nested.c if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && nested 1867 arch/x86/kvm/vmx/nested.c (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { nested 1872 arch/x86/kvm/vmx/nested.c vmx->nested.dirty_vmcs12 = true; nested 1873 arch/x86/kvm/vmx/nested.c vmx->nested.hv_evmcs_vmptr = evmcs_gpa; nested 1895 arch/x86/kvm/vmx/nested.c vmx->nested.hv_evmcs->hv_clean_fields &= nested 1910 arch/x86/kvm/vmx/nested.c if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) nested 1913 arch/x86/kvm/vmx/nested.c if (vmx->nested.hv_evmcs) { nested 1916 arch/x86/kvm/vmx/nested.c vmx->nested.hv_evmcs->hv_clean_fields |= nested 1922 arch/x86/kvm/vmx/nested.c vmx->nested.need_vmcs12_to_shadow_sync = false; nested 1928 arch/x86/kvm/vmx/nested.c container_of(timer, struct vcpu_vmx, nested.preemption_timer); nested 1930 arch/x86/kvm/vmx/nested.c vmx->nested.preemption_timer_expired = true; nested 1947 arch/x86/kvm/vmx/nested.c vmx_preemption_timer_fn(&vmx->nested.preemption_timer); nested 1957 arch/x86/kvm/vmx/nested.c hrtimer_start(&vmx->nested.preemption_timer, nested 1963 arch/x86/kvm/vmx/nested.c if (vmx->nested.nested_run_pending && nested 1980 arch/x86/kvm/vmx/nested.c if (vmx->nested.vmcs02_initialized) nested 1982 arch/x86/kvm/vmx/nested.c vmx->nested.vmcs02_initialized = true; nested 2000 arch/x86/kvm/vmx/nested.c vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); nested 2037 arch/x86/kvm/vmx/nested.c if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) nested 2038 arch/x86/kvm/vmx/nested.c vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); nested 2049 arch/x86/kvm/vmx/nested.c if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) nested 2061 arch/x86/kvm/vmx/nested.c vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; nested 2062 arch/x86/kvm/vmx/nested.c vmx->nested.pi_pending = false; nested 2176 arch/x86/kvm/vmx/nested.c if (vmx->nested.nested_run_pending) { nested 2194 arch/x86/kvm/vmx/nested.c struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; nested 2255 arch/x86/kvm/vmx/nested.c if (kvm_mpx_supported() && vmx->nested.nested_run_pending && nested 2310 arch/x86/kvm/vmx/nested.c struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; nested 2313 arch/x86/kvm/vmx/nested.c if (vmx->nested.dirty_vmcs12 || hv_evmcs) { nested 2315 arch/x86/kvm/vmx/nested.c vmx->nested.dirty_vmcs12 = false; nested 2322 arch/x86/kvm/vmx/nested.c if (vmx->nested.nested_run_pending && nested 2328 arch/x86/kvm/vmx/nested.c vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); nested 2330 arch/x86/kvm/vmx/nested.c if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || nested 2332 arch/x86/kvm/vmx/nested.c vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); nested 2343 arch/x86/kvm/vmx/nested.c if (vmx->nested.nested_run_pending && nested 2366 arch/x86/kvm/vmx/nested.c if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { nested 2367 arch/x86/kvm/vmx/nested.c vmx->nested.last_vpid = vmcs12->virtual_processor_id; nested 2470 arch/x86/kvm/vmx/nested.c if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) nested 2474 arch/x86/kvm/vmx/nested.c if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) nested 2491 arch/x86/kvm/vmx/nested.c if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) nested 2507 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.pinbased_ctls_low, nested 2508 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.pinbased_ctls_high)) || nested 2510 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.procbased_ctls_low, nested 2511 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.procbased_ctls_high))) nested 2516 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.secondary_ctls_low, nested 2517 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.secondary_ctls_high))) nested 2544 arch/x86/kvm/vmx/nested.c ~vmx->nested.msrs.vmfunc_controls)) nested 2566 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.exit_ctls_low, nested 2567 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.exit_ctls_high)) || nested 2583 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.entry_ctls_low, nested 2584 arch/x86/kvm/vmx/nested.c vmx->nested.msrs.entry_ctls_high))) nested 2802 arch/x86/kvm/vmx/nested.c if (to_vmx(vcpu)->nested.nested_run_pending && nested 2946 arch/x86/kvm/vmx/nested.c if (vmx->nested.apic_access_page) { /* shouldn't happen */ nested 2947 arch/x86/kvm/vmx/nested.c kvm_release_page_dirty(vmx->nested.apic_access_page); nested 2948 arch/x86/kvm/vmx/nested.c vmx->nested.apic_access_page = NULL; nested 2952 arch/x86/kvm/vmx/nested.c vmx->nested.apic_access_page = page; nested 2953 arch/x86/kvm/vmx/nested.c hpa = page_to_phys(vmx->nested.apic_access_page); nested 2967 arch/x86/kvm/vmx/nested.c map = &vmx->nested.virtual_apic_map; nested 2993 arch/x86/kvm/vmx/nested.c map = &vmx->nested.pi_desc_map; nested 2996 arch/x86/kvm/vmx/nested.c vmx->nested.pi_desc = nested 3019 arch/x86/kvm/vmx/nested.c if (!to_vmx(vcpu)->nested.vmxon) { nested 3068 arch/x86/kvm/vmx/nested.c vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); nested 3071 arch/x86/kvm/vmx/nested.c vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); nested 3092 arch/x86/kvm/vmx/nested.c vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); nested 3156 arch/x86/kvm/vmx/nested.c vmx->nested.preemption_timer_expired = false; nested 3187 arch/x86/kvm/vmx/nested.c if (enable_shadow_vmcs || vmx->nested.hv_evmcs) nested 3188 arch/x86/kvm/vmx/nested.c vmx->nested.need_vmcs12_to_shadow_sync = true; nested 3209 arch/x86/kvm/vmx/nested.c if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull) nested 3223 arch/x86/kvm/vmx/nested.c if (vmx->nested.hv_evmcs) { nested 3260 arch/x86/kvm/vmx/nested.c vmx->nested.nested_run_pending = 1; nested 3290 arch/x86/kvm/vmx/nested.c vmx->nested.nested_run_pending = 0; nested 3296 arch/x86/kvm/vmx/nested.c vmx->nested.nested_run_pending = 0; nested 3413 arch/x86/kvm/vmx/nested.c if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) nested 3416 arch/x86/kvm/vmx/nested.c vmx->nested.pi_pending = false; nested 3417 arch/x86/kvm/vmx/nested.c if (!pi_test_and_clear_on(vmx->nested.pi_desc)) nested 3420 arch/x86/kvm/vmx/nested.c max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); nested 3422 arch/x86/kvm/vmx/nested.c vapic_page = vmx->nested.virtual_apic_map.hva; nested 3426 arch/x86/kvm/vmx/nested.c __kvm_apic_update_irr(vmx->nested.pi_desc->pir, nested 3468 arch/x86/kvm/vmx/nested.c vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); nested 3488 arch/x86/kvm/vmx/nested.c vmx->nested.preemption_timer_expired) { nested 3524 arch/x86/kvm/vmx/nested.c hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); nested 3626 arch/x86/kvm/vmx/nested.c vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; nested 3635 arch/x86/kvm/vmx/nested.c if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) nested 3642 arch/x86/kvm/vmx/nested.c vmx->loaded_vmcs = &vmx->nested.vmcs02; nested 3662 arch/x86/kvm/vmx/nested.c if (vmx->nested.hv_evmcs) nested 3665 arch/x86/kvm/vmx/nested.c vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs; nested 4080 arch/x86/kvm/vmx/nested.c WARN_ON_ONCE(vmx->nested.nested_run_pending); nested 4085 arch/x86/kvm/vmx/nested.c hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); nested 4129 arch/x86/kvm/vmx/nested.c if (vmx->nested.change_vmcs01_virtual_apic_mode) { nested 4130 arch/x86/kvm/vmx/nested.c vmx->nested.change_vmcs01_virtual_apic_mode = false; nested 4139 arch/x86/kvm/vmx/nested.c if (vmx->nested.apic_access_page) { nested 4140 arch/x86/kvm/vmx/nested.c kvm_release_page_dirty(vmx->nested.apic_access_page); nested 4141 arch/x86/kvm/vmx/nested.c vmx->nested.apic_access_page = NULL; nested 4143 arch/x86/kvm/vmx/nested.c kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); nested 4144 arch/x86/kvm/vmx/nested.c kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); nested 4145 arch/x86/kvm/vmx/nested.c vmx->nested.pi_desc = NULL; nested 4153 arch/x86/kvm/vmx/nested.c if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs)) nested 4154 arch/x86/kvm/vmx/nested.c vmx->nested.need_vmcs12_to_shadow_sync = true; nested 4380 arch/x86/kvm/vmx/nested.c r = alloc_loaded_vmcs(&vmx->nested.vmcs02); nested 4384 arch/x86/kvm/vmx/nested.c vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); nested 4385 arch/x86/kvm/vmx/nested.c if (!vmx->nested.cached_vmcs12) nested 4388 arch/x86/kvm/vmx/nested.c vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); nested 4389 arch/x86/kvm/vmx/nested.c if (!vmx->nested.cached_shadow_vmcs12) nested 4395 arch/x86/kvm/vmx/nested.c hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, nested 4397 arch/x86/kvm/vmx/nested.c vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; nested 4399 arch/x86/kvm/vmx/nested.c vmx->nested.vpid02 = allocate_vpid(); nested 4401 arch/x86/kvm/vmx/nested.c vmx->nested.vmcs02_initialized = false; nested 4402 arch/x86/kvm/vmx/nested.c vmx->nested.vmxon = true; nested 4412 arch/x86/kvm/vmx/nested.c kfree(vmx->nested.cached_shadow_vmcs12); nested 4415 arch/x86/kvm/vmx/nested.c kfree(vmx->nested.cached_vmcs12); nested 4418 arch/x86/kvm/vmx/nested.c free_loaded_vmcs(&vmx->nested.vmcs02); nested 4461 arch/x86/kvm/vmx/nested.c if (vmx->nested.vmxon) nested 4489 arch/x86/kvm/vmx/nested.c vmx->nested.vmxon_ptr = vmptr; nested 4501 arch/x86/kvm/vmx/nested.c if (vmx->nested.current_vmptr == -1ull) nested 4512 arch/x86/kvm/vmx/nested.c vmx->nested.posted_intr_nv = -1; nested 4516 arch/x86/kvm/vmx/nested.c vmx->nested.current_vmptr >> PAGE_SHIFT, nested 4517 arch/x86/kvm/vmx/nested.c vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); nested 4521 arch/x86/kvm/vmx/nested.c vmx->nested.current_vmptr = -1ull; nested 4556 arch/x86/kvm/vmx/nested.c if (vmptr == vmx->nested.vmxon_ptr) nested 4570 arch/x86/kvm/vmx/nested.c if (likely(!vmx->nested.enlightened_vmcs_enabled || nested 4572 arch/x86/kvm/vmx/nested.c if (vmptr == vmx->nested.current_vmptr) nested 4620 arch/x86/kvm/vmx/nested.c if (vmx->nested.current_vmptr == -1ull || nested 4714 arch/x86/kvm/vmx/nested.c if (vmx->nested.current_vmptr == -1ull || nested 4791 arch/x86/kvm/vmx/nested.c vmx->nested.dirty_vmcs12 = true; nested 4799 arch/x86/kvm/vmx/nested.c vmx->nested.current_vmptr = vmptr; nested 4804 arch/x86/kvm/vmx/nested.c vmx->nested.need_vmcs12_to_shadow_sync = true; nested 4806 arch/x86/kvm/vmx/nested.c vmx->nested.dirty_vmcs12 = true; nested 4825 arch/x86/kvm/vmx/nested.c if (vmptr == vmx->nested.vmxon_ptr) nested 4830 arch/x86/kvm/vmx/nested.c if (vmx->nested.hv_evmcs) nested 4833 arch/x86/kvm/vmx/nested.c if (vmx->nested.current_vmptr != vmptr) { nested 4864 arch/x86/kvm/vmx/nested.c memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); nested 4878 arch/x86/kvm/vmx/nested.c gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; nested 4885 arch/x86/kvm/vmx/nested.c if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) nested 4912 arch/x86/kvm/vmx/nested.c if (!(vmx->nested.msrs.secondary_ctls_high & nested 4914 arch/x86/kvm/vmx/nested.c !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { nested 4925 arch/x86/kvm/vmx/nested.c types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; nested 4971 arch/x86/kvm/vmx/nested.c if (!(vmx->nested.msrs.secondary_ctls_high & nested 4973 arch/x86/kvm/vmx/nested.c !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { nested 4984 arch/x86/kvm/vmx/nested.c types = (vmx->nested.msrs.vpid_caps & nested 5330 arch/x86/kvm/vmx/nested.c if (vmx->nested.nested_run_pending) nested 5529 arch/x86/kvm/vmx/nested.c (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { nested 5530 arch/x86/kvm/vmx/nested.c kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; nested 5531 arch/x86/kvm/vmx/nested.c kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; nested 5536 arch/x86/kvm/vmx/nested.c if (vmx->nested.hv_evmcs) nested 5545 arch/x86/kvm/vmx/nested.c if (vmx->nested.smm.vmxon) nested 5548 arch/x86/kvm/vmx/nested.c if (vmx->nested.smm.guest_mode) nested 5554 arch/x86/kvm/vmx/nested.c if (vmx->nested.nested_run_pending) nested 5578 arch/x86/kvm/vmx/nested.c } else if (!vmx->nested.need_vmcs12_to_shadow_sync) { nested 5579 arch/x86/kvm/vmx/nested.c if (vmx->nested.hv_evmcs) nested 5612 arch/x86/kvm/vmx/nested.c to_vmx(vcpu)->nested.nested_run_pending = 0; nested 5682 arch/x86/kvm/vmx/nested.c (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) nested 5690 arch/x86/kvm/vmx/nested.c vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; nested 5710 arch/x86/kvm/vmx/nested.c vmx->nested.need_vmcs12_to_shadow_sync = true; nested 5716 arch/x86/kvm/vmx/nested.c vmx->nested.smm.vmxon = true; nested 5717 arch/x86/kvm/vmx/nested.c vmx->nested.vmxon = false; nested 5720 arch/x86/kvm/vmx/nested.c vmx->nested.smm.guest_mode = true; nested 5733 arch/x86/kvm/vmx/nested.c vmx->nested.nested_run_pending = nested 5763 arch/x86/kvm/vmx/nested.c vmx->nested.dirty_vmcs12 = true; nested 5771 arch/x86/kvm/vmx/nested.c vmx->nested.nested_run_pending = 0; nested 40 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.cached_vmcs12; nested 45 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.cached_shadow_vmcs12; nested 58 arch/x86/kvm/vmx/nested.h return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull || nested 59 arch/x86/kvm/vmx/nested.h vmx->nested.hv_evmcs; nested 119 arch/x86/kvm/vmx/nested.h return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low); nested 129 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.msrs.misc_low & nested 135 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS; nested 140 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & nested 146 arch/x86/kvm/vmx/nested.h return to_vmx(vcpu)->nested.msrs.secondary_ctls_high & nested 262 arch/x86/kvm/vmx/nested.h u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; nested 263 arch/x86/kvm/vmx/nested.h u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; nested 266 arch/x86/kvm/vmx/nested.h if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high & nested 276 arch/x86/kvm/vmx/nested.h u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; nested 277 arch/x86/kvm/vmx/nested.h u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; nested 284 arch/x86/kvm/vmx/nested.h u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0; nested 285 arch/x86/kvm/vmx/nested.h u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1; nested 106 arch/x86/kvm/vmx/vmx.c static bool __read_mostly nested = 1; nested 107 arch/x86/kvm/vmx/vmx.c module_param(nested, bool, S_IRUGO); nested 1720 arch/x86/kvm/vmx/vmx.c return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX); nested 1735 arch/x86/kvm/vmx/vmx.c if (!nested) nested 1737 arch/x86/kvm/vmx/vmx.c return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); nested 1812 arch/x86/kvm/vmx/vmx.c return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, nested 2079 arch/x86/kvm/vmx/vmx.c vmx->nested.vmxon) nested 3044 arch/x86/kvm/vmx/vmx.c if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) nested 3749 arch/x86/kvm/vmx/vmx.c WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn)) nested 3754 arch/x86/kvm/vmx/vmx.c vapic_page = vmx->nested.virtual_apic_map.hva; nested 3761 arch/x86/kvm/vmx/vmx.c bool nested) nested 3764 arch/x86/kvm/vmx/vmx.c int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR; nested 3805 arch/x86/kvm/vmx/vmx.c vector == vmx->nested.posted_intr_nv) { nested 3810 arch/x86/kvm/vmx/vmx.c vmx->nested.pi_pending = true; nested 4036 arch/x86/kvm/vmx/vmx.c if (nested) { nested 4038 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high |= nested 4041 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high &= nested 4051 arch/x86/kvm/vmx/vmx.c if (nested) { nested 4053 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high |= nested 4056 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high &= nested 4072 arch/x86/kvm/vmx/vmx.c if (nested) { nested 4074 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high |= nested 4077 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high &= nested 4087 arch/x86/kvm/vmx/vmx.c if (nested) { nested 4089 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high |= nested 4092 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high &= nested 4102 arch/x86/kvm/vmx/vmx.c if (nested) { nested 4104 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high |= nested 4107 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high &= nested 4119 arch/x86/kvm/vmx/vmx.c if (nested) { nested 4121 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high |= nested 4124 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.secondary_ctls_high &= nested 4151 arch/x86/kvm/vmx/vmx.c if (nested) nested 4464 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->nested.nested_run_pending) nested 4478 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->nested.nested_run_pending) nested 4770 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->nested.vmxon && nested 5230 arch/x86/kvm/vmx/vmx.c WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending); nested 6046 arch/x86/kvm/vmx/vmx.c vmx->nested.change_vmcs01_virtual_apic_mode = true; nested 6284 arch/x86/kvm/vmx/vmx.c return nested; nested 6503 arch/x86/kvm/vmx/vmx.c if (vmx->nested.need_vmcs12_to_shadow_sync) nested 6623 arch/x86/kvm/vmx/vmx.c vmx->nested.nested_run_pending = 0; nested 6770 arch/x86/kvm/vmx/vmx.c if (nested) nested 6771 arch/x86/kvm/vmx/vmx.c nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, nested 6774 arch/x86/kvm/vmx/vmx.c memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); nested 6776 arch/x86/kvm/vmx/vmx.c vmx->nested.posted_intr_nv = -1; nested 6777 arch/x86/kvm/vmx/vmx.c vmx->nested.current_vmptr = -1ull; nested 6853 arch/x86/kvm/vmx/vmx.c if (nested) nested 6854 arch/x86/kvm/vmx/vmx.c nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept); nested 6943 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.cr0_fixed1 = 0xffffffff; nested 6944 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; nested 6948 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ nested 6985 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; nested 6986 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; nested 6988 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; nested 6989 arch/x86/kvm/vmx/vmx.c vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; nested 7091 arch/x86/kvm/vmx/vmx.c if (func == 1 && nested) nested 7282 arch/x86/kvm/vmx/vmx.c WARN_ON_ONCE(vmx->nested.pml_full); nested 7294 arch/x86/kvm/vmx/vmx.c vmx->nested.pml_full = true; nested 7556 arch/x86/kvm/vmx/vmx.c if (to_vmx(vcpu)->nested.nested_run_pending) nested 7565 arch/x86/kvm/vmx/vmx.c vmx->nested.smm.guest_mode = is_guest_mode(vcpu); nested 7566 arch/x86/kvm/vmx/vmx.c if (vmx->nested.smm.guest_mode) nested 7569 arch/x86/kvm/vmx/vmx.c vmx->nested.smm.vmxon = vmx->nested.vmxon; nested 7570 arch/x86/kvm/vmx/vmx.c vmx->nested.vmxon = false; nested 7580 arch/x86/kvm/vmx/vmx.c if (vmx->nested.smm.vmxon) { nested 7581 arch/x86/kvm/vmx/vmx.c vmx->nested.vmxon = true; nested 7582 arch/x86/kvm/vmx/vmx.c vmx->nested.smm.vmxon = false; nested 7585 arch/x86/kvm/vmx/vmx.c if (vmx->nested.smm.guest_mode) { nested 7590 arch/x86/kvm/vmx/vmx.c vmx->nested.smm.guest_mode = false; nested 7607 arch/x86/kvm/vmx/vmx.c return to_vmx(vcpu)->nested.vmxon; nested 7762 arch/x86/kvm/vmx/vmx.c if (nested) { nested 7763 arch/x86/kvm/vmx/vmx.c nested_vmx_setup_ctls_msrs(&vmcs_config.nested, nested 7779 arch/x86/kvm/vmx/vmx.c if (nested) nested 256 arch/x86/kvm/vmx/vmx.h struct nested_vmx nested; nested 16 drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h u32 nested; nested 44 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c '0' + (init->nested - 1) : ' ', ##args); \ nested 2306 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c init->nested++; nested 2317 drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c init->nested--; nested 591 drivers/mtd/ubi/wl.c int vol_id, int lnum, int torture, bool nested) nested 610 drivers/mtd/ubi/wl.c if (nested) nested 642 drivers/mtd/ubi/wl.c static int ensure_wear_leveling(struct ubi_device *ubi, int nested); nested 1009 drivers/mtd/ubi/wl.c static int ensure_wear_leveling(struct ubi_device *ubi, int nested) nested 1056 drivers/mtd/ubi/wl.c if (nested) nested 3098 drivers/usb/host/fotg210-hcd.c static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested) nested 3113 drivers/usb/host/fotg210-hcd.c if (!nested) /* Avoid recursion */ nested 378 fs/ocfs2/journal.c int ret, nested; nested 383 fs/ocfs2/journal.c nested = handle->h_ref > 1; nested 388 fs/ocfs2/journal.c if (!nested) { nested 352 include/linux/lockdep.h extern void lock_release(struct lockdep_map *lock, int nested, nested 1742 include/net/sock.h int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, nested 1745 include/net/sock.h const int nested) nested 1747 include/net/sock.h return __sk_receive_skb(sk, skb, nested, 1, true); nested 1284 kernel/irq/manage.c int ret, nested, shared = 0; nested 1307 kernel/irq/manage.c nested = irq_settings_is_nested_thread(desc); nested 1308 kernel/irq/manage.c if (nested) { nested 1332 kernel/irq/manage.c if (new->thread_fn && !nested) { nested 4497 kernel/locking/lockdep.c void lock_release(struct lockdep_map *lock, int nested, nested 6723 kernel/sched/core.c int nested = preempt_count() + rcu_preempt_depth(); nested 6725 kernel/sched/core.c return (nested == preempt_offset); nested 4960 kernel/trace/ring_buffer.c static __init int rb_write_something(struct rb_test_data *data, bool nested) nested 4971 kernel/trace/ring_buffer.c cnt = data->cnt + (nested ? 27 : 0); nested 4986 kernel/trace/ring_buffer.c if (nested) nested 5003 kernel/trace/ring_buffer.c if (nested) { nested 499 net/core/sock.c const int nested, unsigned int trim_cap, bool refcounted) nested 512 net/core/sock.c if (nested) nested 95 net/netfilter/ipset/ip_set_bitmap_gen.h struct nlattr *nested; nested 98 net/netfilter/ipset/ip_set_bitmap_gen.h nested = nla_nest_start(skb, IPSET_ATTR_DATA); nested 99 net/netfilter/ipset/ip_set_bitmap_gen.h if (!nested) nested 108 net/netfilter/ipset/ip_set_bitmap_gen.h nla_nest_end(skb, nested); nested 207 net/netfilter/ipset/ip_set_bitmap_gen.h struct nlattr *adt, *nested; nested 229 net/netfilter/ipset/ip_set_bitmap_gen.h nested = nla_nest_start(skb, IPSET_ATTR_DATA); nested 230 net/netfilter/ipset/ip_set_bitmap_gen.h if (!nested) { nested 243 net/netfilter/ipset/ip_set_bitmap_gen.h nla_nest_end(skb, nested); nested 253 net/netfilter/ipset/ip_set_bitmap_gen.h nla_nest_cancel(skb, nested); nested 1279 net/netfilter/ipset/ip_set_hash_gen.h struct nlattr *nested; nested 1292 net/netfilter/ipset/ip_set_hash_gen.h nested = nla_nest_start(skb, IPSET_ATTR_DATA); nested 1293 net/netfilter/ipset/ip_set_hash_gen.h if (!nested) nested 1314 net/netfilter/ipset/ip_set_hash_gen.h nla_nest_end(skb, nested); nested 1351 net/netfilter/ipset/ip_set_hash_gen.h struct nlattr *atd, *nested; nested 1384 net/netfilter/ipset/ip_set_hash_gen.h nested = nla_nest_start(skb, IPSET_ATTR_DATA); nested 1385 net/netfilter/ipset/ip_set_hash_gen.h if (!nested) { nested 1397 net/netfilter/ipset/ip_set_hash_gen.h nla_nest_end(skb, nested); nested 462 net/netfilter/ipset/ip_set_list_set.c struct nlattr *nested; nested 465 net/netfilter/ipset/ip_set_list_set.c nested = nla_nest_start(skb, IPSET_ATTR_DATA); nested 466 net/netfilter/ipset/ip_set_list_set.c if (!nested) nested 475 net/netfilter/ipset/ip_set_list_set.c nla_nest_end(skb, nested); nested 487 net/netfilter/ipset/ip_set_list_set.c struct nlattr *atd, *nested; nested 505 net/netfilter/ipset/ip_set_list_set.c nested = nla_nest_start(skb, IPSET_ATTR_DATA); nested 506 net/netfilter/ipset/ip_set_list_set.c if (!nested) nested 513 net/netfilter/ipset/ip_set_list_set.c nla_nest_end(skb, nested); nested 523 net/netfilter/ipset/ip_set_list_set.c nla_nest_cancel(skb, nested); nested 2293 net/wireless/nl80211.c struct nlattr *nested; nested 2295 net/wireless/nl80211.c nested = nla_nest_start_noflag(msg, nested 2297 net/wireless/nl80211.c if (!nested) nested 2305 net/wireless/nl80211.c nla_nest_end(msg, nested); nested 2310 net/wireless/nl80211.c struct nlattr *nested; nested 2312 net/wireless/nl80211.c nested = nla_nest_start_noflag(msg, nested 2314 net/wireless/nl80211.c if (!nested) nested 2322 net/wireless/nl80211.c nla_nest_end(msg, nested); nested 2347 net/wireless/nl80211.c struct nlattr *nested; nested 2350 net/wireless/nl80211.c nested = nla_nest_start_noflag(msg, nested 2352 net/wireless/nl80211.c if (!nested) nested 2363 net/wireless/nl80211.c nla_nest_end(msg, nested); nested 2371 net/wireless/nl80211.c struct nlattr *nested_ext_capab, *nested; nested 2373 net/wireless/nl80211.c nested = nla_nest_start_noflag(msg, nested 2375 net/wireless/nl80211.c if (!nested) nested 2401 net/wireless/nl80211.c nla_nest_end(msg, nested); nested 45 tools/lib/lockdep/include/liblockdep/common.h void lock_release(struct lockdep_map *lock, int nested, nested 58 tools/perf/bench/epoll-ctl.c static unsigned int nested = 0; nested 80 tools/perf/bench/epoll-ctl.c OPT_UINTEGER( 'N', "nested", &nested, "Nesting level epoll hierarchy (default is 0, no nesting)"), nested 106 tools/perf/bench/epoll-ctl.c if (nested > EPOLL_MAXNESTS) nested 107 tools/perf/bench/epoll-ctl.c nested = EPOLL_MAXNESTS; nested 108 tools/perf/bench/epoll-ctl.c printinfo("Nesting level(s): %d\n", nested); nested 110 tools/perf/bench/epoll-ctl.c epollfdp = calloc(nested, sizeof(int)); nested 114 tools/perf/bench/epoll-ctl.c for (i = 0; i < nested; i++) { nested 123 tools/perf/bench/epoll-ctl.c for (i = nested - 1; i; i--) { nested 332 tools/perf/bench/epoll-ctl.c if (nested) nested 106 tools/perf/bench/epoll-wait.c static unsigned int nested = 0; nested 139 tools/perf/bench/epoll-wait.c OPT_UINTEGER( 'N', "nested", &nested, "Nesting level epoll hierarchy (default is 0, no nesting)"), nested 247 tools/perf/bench/epoll-wait.c if (nested > EPOLL_MAXNESTS) nested 248 tools/perf/bench/epoll-wait.c nested = EPOLL_MAXNESTS; nested 250 tools/perf/bench/epoll-wait.c epollfdp = calloc(nested, sizeof(*epollfdp)); nested 254 tools/perf/bench/epoll-wait.c for (i = 0; i < nested; i++) { nested 263 tools/perf/bench/epoll-wait.c for (i = nested - 1; i; i--) { nested 319 tools/perf/bench/epoll-wait.c if (nested) nested 447 tools/perf/bench/epoll-wait.c if (nested) nested 452 tools/perf/bench/epoll-wait.c printinfo("Nesting level(s): %d\n", nested); nested 997 tools/testing/selftests/kvm/lib/x86_64/processor.c struct kvm_nested_state nested; nested 1074 tools/testing/selftests/kvm/lib/x86_64/processor.c state->nested.size = sizeof(state->nested_); nested 1075 tools/testing/selftests/kvm/lib/x86_64/processor.c r = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, &state->nested); nested 1078 tools/testing/selftests/kvm/lib/x86_64/processor.c TEST_ASSERT(state->nested.size <= nested_size, nested 1080 tools/testing/selftests/kvm/lib/x86_64/processor.c state->nested.size, nested_size); nested 1082 tools/testing/selftests/kvm/lib/x86_64/processor.c state->nested.size = 0; nested 1138 tools/testing/selftests/kvm/lib/x86_64/processor.c if (state->nested.size) { nested 1139 tools/testing/selftests/kvm/lib/x86_64/processor.c r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);