Lines Matching refs:kvm_x86_ops
96 struct kvm_x86_ops *kvm_x86_ops __read_mostly;
97 EXPORT_SYMBOL_GPL(kvm_x86_ops);
422 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_complete_insn_gp()
469 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) in kvm_require_cpl()
610 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
623 kvm_x86_ops->set_cr0(vcpu, cr0); in kvm_set_cr0()
709 if (kvm_x86_ops->get_cpl(vcpu) != 0 || in kvm_set_xcr()
757 if (kvm_x86_ops->set_cr4(vcpu, cr4)) in kvm_set_cr4()
832 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); in kvm_update_dr6()
843 kvm_x86_ops->set_dr7(vcpu, dr7); in kvm_update_dr7()
909 *val = kvm_x86_ops->get_dr6(vcpu); in kvm_get_dr()
1020 kvm_x86_ops->set_efer(vcpu, efer); in set_efer()
1067 return kvm_x86_ops->set_msr(vcpu, msr); in kvm_set_msr()
1364 u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); in update_ia32_tsc_adjust_msr()
1406 return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc)); in kvm_read_l1_tsc()
1519 kvm_x86_ops->write_tsc_offset(vcpu, offset); in kvm_write_tsc()
1538 kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment); in adjust_tsc_offset_guest()
1546 kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment); in adjust_tsc_offset_host()
2245 return kvm_x86_ops->get_msr(vcpu, msr); in kvm_get_msr()
2573 r = kvm_x86_ops->cpu_has_high_real_mode_segbase(); in kvm_vm_ioctl_check_extension()
2579 r = !kvm_x86_ops->cpu_has_accelerated_tpr(); in kvm_vm_ioctl_check_extension()
2699 if (kvm_x86_ops->has_wbinvd_exit()) in kvm_arch_vcpu_load()
2706 kvm_x86_ops->vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
2723 kvm_x86_ops->write_tsc_offset(vcpu, offset); in kvm_arch_vcpu_load()
2743 kvm_x86_ops->vcpu_put(vcpu); in kvm_arch_vcpu_put()
2751 kvm_x86_ops->sync_pir_to_irr(vcpu); in kvm_vcpu_ioctl_get_lapic()
2923 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
2927 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
2963 kvm_x86_ops->set_interrupt_shadow(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
2969 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
3482 ret = kvm_x86_ops->set_tss_addr(kvm, addr); in kvm_vm_ioctl_set_tss_addr()
3660 if (kvm_x86_ops->flush_log_dirty) in kvm_vm_ioctl_get_dirty_log()
3661 kvm_x86_ops->flush_log_dirty(kvm); in kvm_vm_ioctl_get_dirty_log()
4018 if (!kvm_x86_ops->mpx_supported()) in kvm_init_msr_list()
4022 if (!kvm_x86_ops->rdtscp_supported()) in kvm_init_msr_list()
4038 if (!kvm_x86_ops->cpu_has_high_real_mode_segbase()) in kvm_init_msr_list()
4098 kvm_x86_ops->set_segment(vcpu, var, seg); in kvm_set_segment()
4104 kvm_x86_ops->get_segment(vcpu, var, seg); in kvm_get_segment()
4124 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
4131 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_fetch()
4139 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
4188 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
4214 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
4276 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
4655 return kvm_x86_ops->get_segment_base(vcpu, seg); in get_segment_base()
4668 if (kvm_x86_ops->has_wbinvd_exit()) { in kvm_emulate_wbinvd_noskip()
4683 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_emulate_wbinvd()
4773 return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); in emulator_get_cpl()
4778 kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); in emulator_get_gdt()
4783 kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); in emulator_get_idt()
4788 kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); in emulator_set_gdt()
4793 kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); in emulator_set_idt()
4944 return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); in emulator_intercept()
4965 kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); in emulator_set_nmi_mask()
5011 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in toggle_interruptibility()
5022 kvm_x86_ops->set_interrupt_shadow(vcpu, mask); in toggle_interruptibility()
5047 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
5099 if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { in handle_emulation_failure()
5452 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); in x86_emulate_instruction()
5660 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); in kvm_is_user_mode()
5770 struct kvm_x86_ops *ops = opaque; in kvm_arch_init()
5772 if (kvm_x86_ops) { in kvm_arch_init()
5802 kvm_x86_ops = ops; in kvm_arch_init()
5838 kvm_x86_ops = NULL; in kvm_arch_exit()
5858 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_emulate_halt()
5886 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_emulate_hypercall()
5908 if (kvm_x86_ops->get_cpl(vcpu) != 0) { in kvm_emulate_hypercall()
5940 kvm_x86_ops->patch_hypercall(vcpu, instruction); in emulator_fix_hypercall()
5968 if (!kvm_x86_ops->update_cr8_intercept) in update_cr8_intercept()
5984 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); in update_cr8_intercept()
6007 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, in inject_pending_event()
6015 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
6020 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
6024 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
6025 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); in inject_pending_event()
6031 if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { in inject_pending_event()
6034 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
6043 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
6044 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); in inject_pending_event()
6048 if (kvm_x86_ops->interrupt_allowed(vcpu)) { in inject_pending_event()
6051 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
6066 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
6159 kvm_x86_ops->get_gdt(vcpu, &dt); in process_smi_save_state_32()
6163 kvm_x86_ops->get_idt(vcpu, &dt); in process_smi_save_state_32()
6213 kvm_x86_ops->get_idt(vcpu, &dt); in process_smi_save_state_64()
6223 kvm_x86_ops->get_gdt(vcpu, &dt); in process_smi_save_state_64()
6256 if (kvm_x86_ops->get_nmi_mask(vcpu)) in process_smi()
6259 kvm_x86_ops->set_nmi_mask(vcpu, true); in process_smi()
6265 kvm_x86_ops->set_cr0(vcpu, cr0); in process_smi()
6268 kvm_x86_ops->set_cr4(vcpu, 0); in process_smi()
6272 kvm_x86_ops->set_idt(vcpu, &dt); in process_smi()
6302 kvm_x86_ops->set_efer(vcpu, 0); in process_smi()
6318 kvm_x86_ops->sync_pir_to_irr(vcpu); in vcpu_scan_ioapic()
6321 kvm_x86_ops->load_eoi_exitmap(vcpu); in vcpu_scan_ioapic()
6327 kvm_x86_ops->tlb_flush(vcpu); in kvm_vcpu_flush_tlb()
6337 if (!kvm_x86_ops->set_apic_access_page_addr) in kvm_vcpu_reload_apic_access_page()
6343 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); in kvm_vcpu_reload_apic_access_page()
6408 kvm_x86_ops->fpu_deactivate(vcpu); in vcpu_enter_guest()
6464 if (kvm_x86_ops->hwapic_irr_update) in vcpu_enter_guest()
6465 kvm_x86_ops->hwapic_irr_update(vcpu, in vcpu_enter_guest()
6481 kvm_x86_ops->enable_nmi_window(vcpu); in vcpu_enter_guest()
6483 kvm_x86_ops->enable_irq_window(vcpu); in vcpu_enter_guest()
6499 kvm_x86_ops->prepare_guest_switch(vcpu); in vcpu_enter_guest()
6543 kvm_x86_ops->run(vcpu); in vcpu_enter_guest()
6553 kvm_x86_ops->sync_dirty_debug_regs(vcpu); in vcpu_enter_guest()
6578 kvm_x86_ops->handle_external_intr(vcpu); in vcpu_enter_guest()
6610 r = kvm_x86_ops->handle_exit(vcpu); in vcpu_enter_guest()
6614 kvm_x86_ops->cancel_injection(vcpu); in vcpu_enter_guest()
6624 (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) { in vcpu_block()
6629 if (kvm_x86_ops->post_block) in vcpu_block()
6630 kvm_x86_ops->post_block(vcpu); in vcpu_block()
6934 kvm_x86_ops->get_idt(vcpu, &dt); in kvm_arch_vcpu_ioctl_get_sregs()
6937 kvm_x86_ops->get_gdt(vcpu, &dt); in kvm_arch_vcpu_ioctl_get_sregs()
7021 kvm_x86_ops->set_idt(vcpu, &dt); in kvm_arch_vcpu_ioctl_set_sregs()
7024 kvm_x86_ops->set_gdt(vcpu, &dt); in kvm_arch_vcpu_ioctl_set_sregs()
7034 kvm_x86_ops->set_efer(vcpu, sregs->efer); in kvm_arch_vcpu_ioctl_set_sregs()
7040 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); in kvm_arch_vcpu_ioctl_set_sregs()
7044 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); in kvm_arch_vcpu_ioctl_set_sregs()
7135 kvm_x86_ops->update_bp_intercept(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
7259 kvm_x86_ops->vcpu_free(vcpu); in kvm_arch_vcpu_free()
7272 vcpu = kvm_x86_ops->vcpu_create(kvm, id); in kvm_arch_vcpu_create()
7321 kvm_x86_ops->vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
7362 kvm_x86_ops->vcpu_reset(vcpu, init_event); in kvm_vcpu_reset()
7387 ret = kvm_x86_ops->hardware_enable(); in kvm_arch_hardware_enable()
7469 kvm_x86_ops->hardware_disable(); in kvm_arch_hardware_disable()
7477 r = kvm_x86_ops->hardware_setup(); in kvm_arch_hardware_setup()
7501 kvm_x86_ops->hardware_unsetup(); in kvm_arch_hardware_unsetup()
7506 kvm_x86_ops->check_processor_compatibility(rtn); in kvm_arch_check_processor_compat()
7624 kvm_x86_ops->sched_in(vcpu, cpu); in kvm_arch_sched_in()
7918 if (kvm_x86_ops->slot_enable_log_dirty) in kvm_mmu_slot_apply_flags()
7919 kvm_x86_ops->slot_enable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
7923 if (kvm_x86_ops->slot_disable_log_dirty) in kvm_mmu_slot_apply_flags()
7924 kvm_x86_ops->slot_disable_log_dirty(kvm, new); in kvm_mmu_slot_apply_flags()
8010 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) in kvm_arch_vcpu_runnable()
8011 kvm_x86_ops->check_nested_events(vcpu, false); in kvm_arch_vcpu_runnable()
8023 return kvm_x86_ops->interrupt_allowed(vcpu); in kvm_arch_interrupt_allowed()
8045 rflags = kvm_x86_ops->get_rflags(vcpu); in kvm_get_rflags()
8057 kvm_x86_ops->set_rflags(vcpu, rflags); in __kvm_set_rflags()
8164 kvm_x86_ops->get_cpl(vcpu) == 0)) in kvm_arch_async_page_not_present()
8206 kvm_x86_ops->interrupt_allowed(vcpu); in kvm_arch_can_inject_async_page_present()
8251 if (kvm_x86_ops->update_pi_irte) { in kvm_arch_irq_bypass_add_producer()
8253 return kvm_x86_ops->update_pi_irte(irqfd->kvm, in kvm_arch_irq_bypass_add_producer()
8267 if (!kvm_x86_ops->update_pi_irte) { in kvm_arch_irq_bypass_del_producer()
8281 ret = kvm_x86_ops->update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); in kvm_arch_irq_bypass_del_producer()
8290 if (!kvm_x86_ops->update_pi_irte) in kvm_arch_update_irqfd_routing()
8293 return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set); in kvm_arch_update_irqfd_routing()