kvm_x86_ops 1230 arch/x86/include/asm/kvm_host.h extern struct kvm_x86_ops *kvm_x86_ops; kvm_x86_ops 1236 arch/x86/include/asm/kvm_host.h return kvm_x86_ops->vm_alloc(); kvm_x86_ops 1241 arch/x86/include/asm/kvm_host.h return kvm_x86_ops->vm_free(kvm); kvm_x86_ops 1247 arch/x86/include/asm/kvm_host.h if (kvm_x86_ops->tlb_remote_flush && kvm_x86_ops 1248 arch/x86/include/asm/kvm_host.h !kvm_x86_ops->tlb_remote_flush(kvm)) kvm_x86_ops 1618 arch/x86/include/asm/kvm_host.h if (kvm_x86_ops->vcpu_blocking) kvm_x86_ops 1619 arch/x86/include/asm/kvm_host.h kvm_x86_ops->vcpu_blocking(vcpu); kvm_x86_ops 1624 arch/x86/include/asm/kvm_host.h if (kvm_x86_ops->vcpu_unblocking) kvm_x86_ops 1625 arch/x86/include/asm/kvm_host.h kvm_x86_ops->vcpu_unblocking(vcpu); kvm_x86_ops 51 arch/x86/kvm/cpuid.c && kvm_x86_ops->mpx_supported()); kvm_x86_ops 235 arch/x86/kvm/cpuid.c kvm_x86_ops->cpuid_update(vcpu); kvm_x86_ops 258 arch/x86/kvm/cpuid.c kvm_x86_ops->cpuid_update(vcpu); kvm_x86_ops 350 arch/x86/kvm/cpuid.c unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; kvm_x86_ops 352 arch/x86/kvm/cpuid.c unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; kvm_x86_ops 353 arch/x86/kvm/cpuid.c unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; kvm_x86_ops 355 arch/x86/kvm/cpuid.c unsigned f_pku = kvm_x86_ops->pku_supported() ? F(PKU) : 0; kvm_x86_ops 438 arch/x86/kvm/cpuid.c unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL) kvm_x86_ops 445 arch/x86/kvm/cpuid.c unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; kvm_x86_ops 446 arch/x86/kvm/cpuid.c unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; kvm_x86_ops 447 arch/x86/kvm/cpuid.c unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; kvm_x86_ops 804 arch/x86/kvm/cpuid.c kvm_x86_ops->set_supported_cpuid(function, entry); kvm_x86_ops 1023 arch/x86/kvm/hyperv.c kvm_x86_ops->patch_hypercall(vcpu, instructions); kvm_x86_ops 1608 arch/x86/kvm/hyperv.c if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { kvm_x86_ops 1801 arch/x86/kvm/hyperv.c if (kvm_x86_ops->nested_get_evmcs_version) kvm_x86_ops 1802 arch/x86/kvm/hyperv.c evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu); kvm_x86_ops 44 arch/x86/kvm/kvm_cache_regs.h kvm_x86_ops->cache_reg(vcpu, reg); kvm_x86_ops 84 arch/x86/kvm/kvm_cache_regs.h kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR); kvm_x86_ops 93 arch/x86/kvm/kvm_cache_regs.h kvm_x86_ops->decache_cr0_guest_bits(vcpu); kvm_x86_ops 106 arch/x86/kvm/kvm_cache_regs.h kvm_x86_ops->decache_cr4_guest_bits(vcpu); kvm_x86_ops 113 arch/x86/kvm/kvm_cache_regs.h kvm_x86_ops->decache_cr3(vcpu); kvm_x86_ops 454 arch/x86/kvm/lapic.c kvm_x86_ops->hwapic_irr_update(vcpu, kvm_x86_ops 479 arch/x86/kvm/lapic.c kvm_x86_ops->hwapic_isr_update(vcpu, vec); kvm_x86_ops 527 arch/x86/kvm/lapic.c kvm_x86_ops->hwapic_isr_update(vcpu, kvm_x86_ops 672 arch/x86/kvm/lapic.c highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu); kvm_x86_ops 1061 arch/x86/kvm/lapic.c if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) { kvm_x86_ops 1703 arch/x86/kvm/lapic.c kvm_x86_ops->cancel_hv_timer(apic->vcpu); kvm_x86_ops 1714 arch/x86/kvm/lapic.c if (!kvm_x86_ops->set_hv_timer) kvm_x86_ops 1720 arch/x86/kvm/lapic.c if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired)) kvm_x86_ops 2145 arch/x86/kvm/lapic.c kvm_x86_ops->set_virtual_apic_mode(vcpu); kvm_x86_ops 2208 arch/x86/kvm/lapic.c kvm_x86_ops->apicv_post_state_restore(vcpu); kvm_x86_ops 2209 arch/x86/kvm/lapic.c kvm_x86_ops->hwapic_irr_update(vcpu, -1); kvm_x86_ops 2210 arch/x86/kvm/lapic.c kvm_x86_ops->hwapic_isr_update(vcpu, -1); kvm_x86_ops 2461 arch/x86/kvm/lapic.c kvm_x86_ops->apicv_post_state_restore(vcpu); kvm_x86_ops 2462 arch/x86/kvm/lapic.c kvm_x86_ops->hwapic_irr_update(vcpu, kvm_x86_ops 2464 arch/x86/kvm/lapic.c kvm_x86_ops->hwapic_isr_update(vcpu, kvm_x86_ops 2716 arch/x86/kvm/lapic.c if (is_smm(vcpu) || kvm_x86_ops->apic_init_signal_blocked(vcpu)) { kvm_x86_ops 316 arch/x86/kvm/mmu.c return kvm_x86_ops->tlb_remote_flush_with_range; kvm_x86_ops 324 arch/x86/kvm/mmu.c if (range && kvm_x86_ops->tlb_remote_flush_with_range) kvm_x86_ops 325 arch/x86/kvm/mmu.c ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range); kvm_x86_ops 1350 arch/x86/kvm/mmu.c max_level = min(kvm_x86_ops->get_lpage_level(), host_level); kvm_x86_ops 1808 arch/x86/kvm/mmu.c if (kvm_x86_ops->enable_log_dirty_pt_masked) kvm_x86_ops 1809 arch/x86/kvm/mmu.c kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset, kvm_x86_ops 1824 arch/x86/kvm/mmu.c if (kvm_x86_ops->write_log_dirty) kvm_x86_ops 1825 arch/x86/kvm/mmu.c return kvm_x86_ops->write_log_dirty(vcpu); kvm_x86_ops 3093 arch/x86/kvm/mmu.c spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, kvm_x86_ops 4414 arch/x86/kvm/mmu.c kvm_x86_ops->tlb_flush(vcpu, true); kvm_x86_ops 5009 arch/x86/kvm/mmu.c role.base.level = kvm_x86_ops->get_tdp_level(vcpu); kvm_x86_ops 5031 arch/x86/kvm/mmu.c context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu); kvm_x86_ops 5033 arch/x86/kvm/mmu.c context->set_cr3 = kvm_x86_ops->set_tdp_cr3; kvm_x86_ops 5181 arch/x86/kvm/mmu.c context->set_cr3 = kvm_x86_ops->set_cr3; kvm_x86_ops 5288 arch/x86/kvm/mmu.c kvm_x86_ops->tlb_flush(vcpu, true); kvm_x86_ops 5601 arch/x86/kvm/mmu.c if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu)) kvm_x86_ops 5636 arch/x86/kvm/mmu.c kvm_x86_ops->tlb_flush_gva(vcpu, gva); kvm_x86_ops 5661 arch/x86/kvm/mmu.c kvm_x86_ops->tlb_flush_gva(vcpu, gva); kvm_x86_ops 5778 arch/x86/kvm/mmu.c if (tdp_enabled && kvm_x86_ops->get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) kvm_x86_ops 160 arch/x86/kvm/mmu.h int cpl = kvm_x86_ops->get_cpl(vcpu); kvm_x86_ops 161 arch/x86/kvm/mmu.h unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); kvm_x86_ops 186 arch/x86/kvm/pmu.c config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc), kvm_x86_ops 228 arch/x86/kvm/pmu.c kvm_x86_ops->pmu_ops->find_fixed_event(idx), kvm_x86_ops 237 arch/x86/kvm/pmu.c struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); kvm_x86_ops 262 arch/x86/kvm/pmu.c struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit); kvm_x86_ops 276 arch/x86/kvm/pmu.c return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx); kvm_x86_ops 326 arch/x86/kvm/pmu.c pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask); kvm_x86_ops 342 arch/x86/kvm/pmu.c return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr); kvm_x86_ops 347 arch/x86/kvm/pmu.c return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data); kvm_x86_ops 352 arch/x86/kvm/pmu.c return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info); kvm_x86_ops 361 arch/x86/kvm/pmu.c kvm_x86_ops->pmu_ops->refresh(vcpu); kvm_x86_ops 369 arch/x86/kvm/pmu.c kvm_x86_ops->pmu_ops->reset(vcpu); kvm_x86_ops 377 arch/x86/kvm/pmu.c kvm_x86_ops->pmu_ops->init(vcpu); kvm_x86_ops 81 arch/x86/kvm/pmu.h return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc); kvm_x86_ops 7237 arch/x86/kvm/svm.c static struct kvm_x86_ops svm_x86_ops __ro_after_init = { kvm_x86_ops 243 arch/x86/kvm/trace.h kvm_x86_ops->get_exit_info(vcpu, &__entry->info1, kvm_x86_ops 747 arch/x86/kvm/trace.h __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); kvm_x86_ops 6068 arch/x86/kvm/vmx/nested.c kvm_x86_ops->check_nested_events = vmx_check_nested_events; kvm_x86_ops 6069 arch/x86/kvm/vmx/nested.c kvm_x86_ops->get_nested_state = vmx_get_nested_state; kvm_x86_ops 6070 arch/x86/kvm/vmx/nested.c kvm_x86_ops->set_nested_state = vmx_set_nested_state; kvm_x86_ops 6071 arch/x86/kvm/vmx/nested.c kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages, kvm_x86_ops 6072 arch/x86/kvm/vmx/nested.c kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs; kvm_x86_ops 6073 arch/x86/kvm/vmx/nested.c kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version; kvm_x86_ops 319 arch/x86/kvm/vmx/pmu_intel.c if (kvm_x86_ops->pt_supported()) kvm_x86_ops 2982 arch/x86/kvm/vmx/vmx.c if (kvm_x86_ops->tlb_remote_flush) { kvm_x86_ops 7443 arch/x86/kvm/vmx/vmx.c if (kvm_x86_ops->set_hv_timer) kvm_x86_ops 7666 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->set_apic_access_page_addr = NULL; kvm_x86_ops 7669 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->update_cr8_intercept = NULL; kvm_x86_ops 7677 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb; kvm_x86_ops 7678 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->tlb_remote_flush_with_range = kvm_x86_ops 7693 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->sync_pir_to_irr = NULL; kvm_x86_ops 7717 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->slot_enable_log_dirty = NULL; kvm_x86_ops 7718 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->slot_disable_log_dirty = NULL; kvm_x86_ops 7719 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->flush_log_dirty = NULL; kvm_x86_ops 7720 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->enable_log_dirty_pt_masked = NULL; kvm_x86_ops 7748 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->set_hv_timer = NULL; kvm_x86_ops 7749 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->cancel_hv_timer = NULL; kvm_x86_ops 7750 arch/x86/kvm/vmx/vmx.c kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit; kvm_x86_ops 7785 arch/x86/kvm/vmx/vmx.c static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { kvm_x86_ops 110 arch/x86/kvm/x86.c struct kvm_x86_ops *kvm_x86_ops __read_mostly; kvm_x86_ops 111 arch/x86/kvm/x86.c EXPORT_SYMBOL_GPL(kvm_x86_ops); kvm_x86_ops 641 arch/x86/kvm/x86.c if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) kvm_x86_ops 788 arch/x86/kvm/x86.c kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); kvm_x86_ops 801 arch/x86/kvm/x86.c kvm_x86_ops->set_cr0(vcpu, cr0); kvm_x86_ops 904 arch/x86/kvm/x86.c if (kvm_x86_ops->get_cpl(vcpu) != 0 || kvm_x86_ops 936 arch/x86/kvm/x86.c if (!cpu_has(c, X86_FEATURE_UMIP) && !kvm_x86_ops->umip_emulated()) kvm_x86_ops 998 arch/x86/kvm/x86.c if (kvm_x86_ops->set_cr4(vcpu, cr4)) kvm_x86_ops 1082 arch/x86/kvm/x86.c kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); kvm_x86_ops 1093 arch/x86/kvm/x86.c kvm_x86_ops->set_dr7(vcpu, dr7); kvm_x86_ops 1163 arch/x86/kvm/x86.c *val = kvm_x86_ops->get_dr6(vcpu); kvm_x86_ops 1410 arch/x86/kvm/x86.c if (kvm_x86_ops->get_msr_feature(msr)) kvm_x86_ops 1478 arch/x86/kvm/x86.c kvm_x86_ops->set_efer(vcpu, efer); kvm_x86_ops 1534 arch/x86/kvm/x86.c return kvm_x86_ops->set_msr(vcpu, &msr); kvm_x86_ops 1552 arch/x86/kvm/x86.c ret = kvm_x86_ops->get_msr(vcpu, &msr); kvm_x86_ops 1873 arch/x86/kvm/x86.c u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); kvm_x86_ops 1915 arch/x86/kvm/x86.c u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); kvm_x86_ops 1923 arch/x86/kvm/x86.c vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset); kvm_x86_ops 2047 arch/x86/kvm/x86.c u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); kvm_x86_ops 2644 arch/x86/kvm/x86.c kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa); kvm_x86_ops 3344 arch/x86/kvm/x86.c r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE); kvm_x86_ops 3347 arch/x86/kvm/x86.c r = !kvm_x86_ops->cpu_has_accelerated_tpr(); kvm_x86_ops 3374 arch/x86/kvm/x86.c r = kvm_x86_ops->get_nested_state ? kvm_x86_ops 3375 arch/x86/kvm/x86.c kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0; kvm_x86_ops 3378 arch/x86/kvm/x86.c r = kvm_x86_ops->enable_direct_tlbflush != NULL; kvm_x86_ops 3381 arch/x86/kvm/x86.c r = kvm_x86_ops->nested_enable_evmcs != NULL; kvm_x86_ops 3497 arch/x86/kvm/x86.c if (kvm_x86_ops->has_wbinvd_exit()) kvm_x86_ops 3504 arch/x86/kvm/x86.c kvm_x86_ops->vcpu_load(vcpu, cpu); kvm_x86_ops 3571 arch/x86/kvm/x86.c vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu); kvm_x86_ops 3590 arch/x86/kvm/x86.c kvm_x86_ops->vcpu_put(vcpu); kvm_x86_ops 3604 arch/x86/kvm/x86.c kvm_x86_ops->sync_pir_to_irr(vcpu); kvm_x86_ops 3712 arch/x86/kvm/x86.c kvm_x86_ops->setup_mce(vcpu); kvm_x86_ops 3801 arch/x86/kvm/x86.c events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); kvm_x86_ops 3805 arch/x86/kvm/x86.c events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); kvm_x86_ops 3872 arch/x86/kvm/x86.c kvm_x86_ops->set_interrupt_shadow(vcpu, kvm_x86_ops 3878 arch/x86/kvm/x86.c kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); kvm_x86_ops 4152 arch/x86/kvm/x86.c if (!kvm_x86_ops->nested_enable_evmcs) kvm_x86_ops 4154 arch/x86/kvm/x86.c r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version); kvm_x86_ops 4163 arch/x86/kvm/x86.c if (!kvm_x86_ops->enable_direct_tlbflush) kvm_x86_ops 4166 arch/x86/kvm/x86.c return kvm_x86_ops->enable_direct_tlbflush(vcpu); kvm_x86_ops 4469 arch/x86/kvm/x86.c if (!kvm_x86_ops->get_nested_state) kvm_x86_ops 4477 arch/x86/kvm/x86.c r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state, kvm_x86_ops 4499 arch/x86/kvm/x86.c if (!kvm_x86_ops->set_nested_state) kvm_x86_ops 4521 arch/x86/kvm/x86.c r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); kvm_x86_ops 4565 arch/x86/kvm/x86.c ret = kvm_x86_ops->set_tss_addr(kvm, addr); kvm_x86_ops 4572 arch/x86/kvm/x86.c return kvm_x86_ops->set_identity_map_addr(kvm, ident_addr); kvm_x86_ops 4756 arch/x86/kvm/x86.c if (kvm_x86_ops->flush_log_dirty) kvm_x86_ops 4757 arch/x86/kvm/x86.c kvm_x86_ops->flush_log_dirty(kvm); kvm_x86_ops 4783 arch/x86/kvm/x86.c if (kvm_x86_ops->flush_log_dirty) kvm_x86_ops 4784 arch/x86/kvm/x86.c kvm_x86_ops->flush_log_dirty(kvm); kvm_x86_ops 5150 arch/x86/kvm/x86.c if (kvm_x86_ops->mem_enc_op) kvm_x86_ops 5151 arch/x86/kvm/x86.c r = kvm_x86_ops->mem_enc_op(kvm, argp); kvm_x86_ops 5162 arch/x86/kvm/x86.c if (kvm_x86_ops->mem_enc_reg_region) kvm_x86_ops 5163 arch/x86/kvm/x86.c r = kvm_x86_ops->mem_enc_reg_region(kvm, ®ion); kvm_x86_ops 5174 arch/x86/kvm/x86.c if (kvm_x86_ops->mem_enc_unreg_region) kvm_x86_ops 5175 arch/x86/kvm/x86.c r = kvm_x86_ops->mem_enc_unreg_region(kvm, ®ion); kvm_x86_ops 5226 arch/x86/kvm/x86.c if (!kvm_x86_ops->rdtscp_supported()) kvm_x86_ops 5231 arch/x86/kvm/x86.c if (!kvm_x86_ops->pt_supported()) kvm_x86_ops 5235 arch/x86/kvm/x86.c if (!kvm_x86_ops->pt_supported() || kvm_x86_ops 5241 arch/x86/kvm/x86.c if (!kvm_x86_ops->pt_supported() || kvm_x86_ops 5247 arch/x86/kvm/x86.c if (!kvm_x86_ops->pt_supported() || kvm_x86_ops 5270 arch/x86/kvm/x86.c if (!kvm_x86_ops->has_emulated_msr(emulated_msrs_all[i])) kvm_x86_ops 5333 arch/x86/kvm/x86.c kvm_x86_ops->set_segment(vcpu, var, seg); kvm_x86_ops 5339 arch/x86/kvm/x86.c kvm_x86_ops->get_segment(vcpu, var, seg); kvm_x86_ops 5359 arch/x86/kvm/x86.c u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; kvm_x86_ops 5366 arch/x86/kvm/x86.c u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; kvm_x86_ops 5374 arch/x86/kvm/x86.c u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; kvm_x86_ops 5423 arch/x86/kvm/x86.c u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; kvm_x86_ops 5448 arch/x86/kvm/x86.c u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; kvm_x86_ops 5469 arch/x86/kvm/x86.c if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) kvm_x86_ops 5522 arch/x86/kvm/x86.c if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) kvm_x86_ops 5584 arch/x86/kvm/x86.c u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) kvm_x86_ops 5972 arch/x86/kvm/x86.c return kvm_x86_ops->get_segment_base(vcpu, seg); kvm_x86_ops 5985 arch/x86/kvm/x86.c if (kvm_x86_ops->has_wbinvd_exit()) { kvm_x86_ops 6090 arch/x86/kvm/x86.c return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); kvm_x86_ops 6095 arch/x86/kvm/x86.c kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); kvm_x86_ops 6100 arch/x86/kvm/x86.c kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); kvm_x86_ops 6105 arch/x86/kvm/x86.c kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); kvm_x86_ops 6110 arch/x86/kvm/x86.c kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); kvm_x86_ops 6232 arch/x86/kvm/x86.c return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); kvm_x86_ops 6253 arch/x86/kvm/x86.c kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); kvm_x86_ops 6269 arch/x86/kvm/x86.c return kvm_x86_ops->pre_leave_smm(emul_to_vcpu(ctxt), smstate); kvm_x86_ops 6328 arch/x86/kvm/x86.c u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); kvm_x86_ops 6339 arch/x86/kvm/x86.c kvm_x86_ops->set_interrupt_shadow(vcpu, mask); kvm_x86_ops 6364 arch/x86/kvm/x86.c kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); kvm_x86_ops 6424 arch/x86/kvm/x86.c if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { kvm_x86_ops 6603 arch/x86/kvm/x86.c unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); kvm_x86_ops 6606 arch/x86/kvm/x86.c r = kvm_x86_ops->skip_emulated_instruction(vcpu); kvm_x86_ops 6830 arch/x86/kvm/x86.c unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); kvm_x86_ops 7166 arch/x86/kvm/x86.c user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); kvm_x86_ops 7244 arch/x86/kvm/x86.c struct kvm_x86_ops *ops = opaque; kvm_x86_ops 7246 arch/x86/kvm/x86.c if (kvm_x86_ops) { kvm_x86_ops 7293 arch/x86/kvm/x86.c kvm_x86_ops = ops; kvm_x86_ops 7341 arch/x86/kvm/x86.c kvm_x86_ops = NULL; kvm_x86_ops 7430 arch/x86/kvm/x86.c kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu); kvm_x86_ops 7475 arch/x86/kvm/x86.c if (kvm_x86_ops->get_cpl(vcpu) != 0) { kvm_x86_ops 7521 arch/x86/kvm/x86.c kvm_x86_ops->patch_hypercall(vcpu, instruction); kvm_x86_ops 7550 arch/x86/kvm/x86.c if (!kvm_x86_ops->update_cr8_intercept) kvm_x86_ops 7569 arch/x86/kvm/x86.c kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); kvm_x86_ops 7579 arch/x86/kvm/x86.c kvm_x86_ops->queue_exception(vcpu); kvm_x86_ops 7596 arch/x86/kvm/x86.c kvm_x86_ops->set_nmi(vcpu); kvm_x86_ops 7598 arch/x86/kvm/x86.c kvm_x86_ops->set_irq(vcpu); kvm_x86_ops 7607 arch/x86/kvm/x86.c if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { kvm_x86_ops 7608 arch/x86/kvm/x86.c r = kvm_x86_ops->check_nested_events(vcpu); kvm_x86_ops 7645 arch/x86/kvm/x86.c kvm_x86_ops->queue_exception(vcpu); kvm_x86_ops 7653 arch/x86/kvm/x86.c kvm_x86_ops->smi_allowed(vcpu)) { kvm_x86_ops 7657 arch/x86/kvm/x86.c } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { kvm_x86_ops 7660 arch/x86/kvm/x86.c kvm_x86_ops->set_nmi(vcpu); kvm_x86_ops 7669 arch/x86/kvm/x86.c if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { kvm_x86_ops 7670 arch/x86/kvm/x86.c r = kvm_x86_ops->check_nested_events(vcpu); kvm_x86_ops 7674 arch/x86/kvm/x86.c if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_x86_ops 7677 arch/x86/kvm/x86.c kvm_x86_ops->set_irq(vcpu); kvm_x86_ops 7693 arch/x86/kvm/x86.c if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) kvm_x86_ops 7783 arch/x86/kvm/x86.c kvm_x86_ops->get_gdt(vcpu, &dt); kvm_x86_ops 7787 arch/x86/kvm/x86.c kvm_x86_ops->get_idt(vcpu, &dt); kvm_x86_ops 7837 arch/x86/kvm/x86.c kvm_x86_ops->get_idt(vcpu, &dt); kvm_x86_ops 7847 arch/x86/kvm/x86.c kvm_x86_ops->get_gdt(vcpu, &dt); kvm_x86_ops 7877 arch/x86/kvm/x86.c kvm_x86_ops->pre_enter_smm(vcpu, buf); kvm_x86_ops 7882 arch/x86/kvm/x86.c if (kvm_x86_ops->get_nmi_mask(vcpu)) kvm_x86_ops 7885 arch/x86/kvm/x86.c kvm_x86_ops->set_nmi_mask(vcpu, true); kvm_x86_ops 7891 arch/x86/kvm/x86.c kvm_x86_ops->set_cr0(vcpu, cr0); kvm_x86_ops 7894 arch/x86/kvm/x86.c kvm_x86_ops->set_cr4(vcpu, 0); kvm_x86_ops 7898 arch/x86/kvm/x86.c kvm_x86_ops->set_idt(vcpu, &dt); kvm_x86_ops 7929 arch/x86/kvm/x86.c kvm_x86_ops->set_efer(vcpu, 0); kvm_x86_ops 7958 arch/x86/kvm/x86.c kvm_x86_ops->sync_pir_to_irr(vcpu); kvm_x86_ops 7978 arch/x86/kvm/x86.c kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); kvm_x86_ops 8002 arch/x86/kvm/x86.c if (!kvm_x86_ops->set_apic_access_page_addr) kvm_x86_ops 8008 arch/x86/kvm/x86.c kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); kvm_x86_ops 8040 arch/x86/kvm/x86.c if (unlikely(!kvm_x86_ops->get_vmcs12_pages(vcpu))) { kvm_x86_ops 8162 arch/x86/kvm/x86.c if (!kvm_x86_ops->enable_smi_window(vcpu)) kvm_x86_ops 8165 arch/x86/kvm/x86.c kvm_x86_ops->enable_nmi_window(vcpu); kvm_x86_ops 8167 arch/x86/kvm/x86.c kvm_x86_ops->enable_irq_window(vcpu); kvm_x86_ops 8184 arch/x86/kvm/x86.c kvm_x86_ops->prepare_guest_switch(vcpu); kvm_x86_ops 8215 arch/x86/kvm/x86.c kvm_x86_ops->sync_pir_to_irr(vcpu); kvm_x86_ops 8230 arch/x86/kvm/x86.c kvm_x86_ops->request_immediate_exit(vcpu); kvm_x86_ops 8253 arch/x86/kvm/x86.c kvm_x86_ops->run(vcpu); kvm_x86_ops 8263 arch/x86/kvm/x86.c kvm_x86_ops->sync_dirty_debug_regs(vcpu); kvm_x86_ops 8285 arch/x86/kvm/x86.c kvm_x86_ops->handle_exit_irqoff(vcpu); kvm_x86_ops 8329 arch/x86/kvm/x86.c r = kvm_x86_ops->handle_exit(vcpu); kvm_x86_ops 8333 arch/x86/kvm/x86.c kvm_x86_ops->cancel_injection(vcpu); kvm_x86_ops 8343 arch/x86/kvm/x86.c (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) { kvm_x86_ops 8348 arch/x86/kvm/x86.c if (kvm_x86_ops->post_block) kvm_x86_ops 8349 arch/x86/kvm/x86.c kvm_x86_ops->post_block(vcpu); kvm_x86_ops 8376 arch/x86/kvm/x86.c if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) kvm_x86_ops 8377 arch/x86/kvm/x86.c kvm_x86_ops->check_nested_events(vcpu); kvm_x86_ops 8736 arch/x86/kvm/x86.c kvm_x86_ops->get_idt(vcpu, &dt); kvm_x86_ops 8739 arch/x86/kvm/x86.c kvm_x86_ops->get_gdt(vcpu, &dt); kvm_x86_ops 8883 arch/x86/kvm/x86.c kvm_x86_ops->set_idt(vcpu, &dt); kvm_x86_ops 8886 arch/x86/kvm/x86.c kvm_x86_ops->set_gdt(vcpu, &dt); kvm_x86_ops 8896 arch/x86/kvm/x86.c kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_x86_ops 8899 arch/x86/kvm/x86.c kvm_x86_ops->set_cr0(vcpu, sregs->cr0); kvm_x86_ops 8905 arch/x86/kvm/x86.c kvm_x86_ops->set_cr4(vcpu, sregs->cr4); kvm_x86_ops 9011 arch/x86/kvm/x86.c kvm_x86_ops->update_bp_intercept(vcpu); kvm_x86_ops 9148 arch/x86/kvm/x86.c kvm_x86_ops->vcpu_free(vcpu); kvm_x86_ops 9162 arch/x86/kvm/x86.c vcpu = kvm_x86_ops->vcpu_create(kvm, id); kvm_x86_ops 9282 arch/x86/kvm/x86.c kvm_x86_ops->vcpu_reset(vcpu, init_event); kvm_x86_ops 9307 arch/x86/kvm/x86.c ret = kvm_x86_ops->hardware_enable(); kvm_x86_ops 9389 arch/x86/kvm/x86.c kvm_x86_ops->hardware_disable(); kvm_x86_ops 9397 arch/x86/kvm/x86.c r = kvm_x86_ops->hardware_setup(); kvm_x86_ops 9423 arch/x86/kvm/x86.c kvm_x86_ops->hardware_unsetup(); kvm_x86_ops 9428 arch/x86/kvm/x86.c return kvm_x86_ops->check_processor_compatibility(); kvm_x86_ops 9470 arch/x86/kvm/x86.c vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); kvm_x86_ops 9540 arch/x86/kvm/x86.c kvm_x86_ops->sched_in(vcpu, cpu); kvm_x86_ops 9577 arch/x86/kvm/x86.c return kvm_x86_ops->vm_init(kvm); kvm_x86_ops 9703 arch/x86/kvm/x86.c if (kvm_x86_ops->vm_destroy) kvm_x86_ops 9704 arch/x86/kvm/x86.c kvm_x86_ops->vm_destroy(kvm); kvm_x86_ops 9878 arch/x86/kvm/x86.c if (kvm_x86_ops->slot_enable_log_dirty) kvm_x86_ops 9879 arch/x86/kvm/x86.c kvm_x86_ops->slot_enable_log_dirty(kvm, new); kvm_x86_ops 9883 arch/x86/kvm/x86.c if (kvm_x86_ops->slot_disable_log_dirty) kvm_x86_ops 9884 arch/x86/kvm/x86.c kvm_x86_ops->slot_disable_log_dirty(kvm, new); kvm_x86_ops 9948 arch/x86/kvm/x86.c kvm_x86_ops->guest_apic_has_interrupt && kvm_x86_ops 9949 arch/x86/kvm/x86.c kvm_x86_ops->guest_apic_has_interrupt(vcpu)); kvm_x86_ops 9968 arch/x86/kvm/x86.c kvm_x86_ops->nmi_allowed(vcpu))) kvm_x86_ops 10001 arch/x86/kvm/x86.c if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu)) kvm_x86_ops 10019 arch/x86/kvm/x86.c return kvm_x86_ops->interrupt_allowed(vcpu); kvm_x86_ops 10041 arch/x86/kvm/x86.c rflags = kvm_x86_ops->get_rflags(vcpu); kvm_x86_ops 10053 arch/x86/kvm/x86.c kvm_x86_ops->set_rflags(vcpu, rflags); kvm_x86_ops 10164 arch/x86/kvm/x86.c kvm_x86_ops->get_cpl(vcpu) == 0)) kvm_x86_ops 10184 arch/x86/kvm/x86.c return kvm_x86_ops->interrupt_allowed(vcpu); kvm_x86_ops 10313 arch/x86/kvm/x86.c return kvm_x86_ops->update_pi_irte(irqfd->kvm, kvm_x86_ops 10333 arch/x86/kvm/x86.c ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); kvm_x86_ops 10342 arch/x86/kvm/x86.c return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set); kvm_x86_ops 99 arch/x86/kvm/x86.h kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);