Lines Matching refs:vcpu
92 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
93 static void process_nmi(struct kvm_vcpu *vcpu);
94 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
189 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) in kvm_async_pf_hash_reset() argument
193 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
279 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) in kvm_get_apic_base() argument
281 return vcpu->arch.apic_base; in kvm_get_apic_base()
285 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_apic_base() argument
287 u64 old_state = vcpu->arch.apic_base & in kvm_set_apic_base()
291 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | in kvm_set_apic_base()
292 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); in kvm_set_apic_base()
303 kvm_lapic_set_base(vcpu, msr_info->data); in kvm_set_apic_base()
361 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, in kvm_multiple_exception() argument
368 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_multiple_exception()
370 if (!vcpu->arch.exception.pending) { in kvm_multiple_exception()
372 if (has_error && !is_protmode(vcpu)) in kvm_multiple_exception()
374 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
375 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
376 vcpu->arch.exception.nr = nr; in kvm_multiple_exception()
377 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
378 vcpu->arch.exception.reinject = reinject; in kvm_multiple_exception()
383 prev_nr = vcpu->arch.exception.nr; in kvm_multiple_exception()
386 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_multiple_exception()
394 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
395 vcpu->arch.exception.has_error_code = true; in kvm_multiple_exception()
396 vcpu->arch.exception.nr = DF_VECTOR; in kvm_multiple_exception()
397 vcpu->arch.exception.error_code = 0; in kvm_multiple_exception()
405 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_queue_exception() argument
407 kvm_multiple_exception(vcpu, nr, false, 0, false); in kvm_queue_exception()
411 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_requeue_exception() argument
413 kvm_multiple_exception(vcpu, nr, false, 0, true); in kvm_requeue_exception()
417 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) in kvm_complete_insn_gp() argument
420 kvm_inject_gp(vcpu, 0); in kvm_complete_insn_gp()
422 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_complete_insn_gp()
426 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) in kvm_inject_page_fault() argument
428 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
429 vcpu->arch.cr2 = fault->address; in kvm_inject_page_fault()
430 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); in kvm_inject_page_fault()
434 static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) in kvm_propagate_fault() argument
436 if (mmu_is_nested(vcpu) && !fault->nested_page_fault) in kvm_propagate_fault()
437 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault()
439 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault()
444 void kvm_inject_nmi(struct kvm_vcpu *vcpu) in kvm_inject_nmi() argument
446 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
447 kvm_make_request(KVM_REQ_NMI, vcpu); in kvm_inject_nmi()
451 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_queue_exception_e() argument
453 kvm_multiple_exception(vcpu, nr, true, error_code, false); in kvm_queue_exception_e()
457 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_requeue_exception_e() argument
459 kvm_multiple_exception(vcpu, nr, true, error_code, true); in kvm_requeue_exception_e()
467 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) in kvm_require_cpl() argument
469 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) in kvm_require_cpl()
471 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in kvm_require_cpl()
476 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) in kvm_require_dr() argument
478 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) in kvm_require_dr()
481 kvm_queue_exception(vcpu, UD_VECTOR); in kvm_require_dr()
491 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in kvm_read_guest_page_mmu() argument
500 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); in kvm_read_guest_page_mmu()
506 return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len); in kvm_read_guest_page_mmu()
510 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_read_nested_guest_page() argument
513 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
520 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) in load_pdptrs() argument
528 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, in load_pdptrs()
538 vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) { in load_pdptrs()
547 (unsigned long *)&vcpu->arch.regs_avail); in load_pdptrs()
549 (unsigned long *)&vcpu->arch.regs_dirty); in load_pdptrs()
556 static bool pdptrs_changed(struct kvm_vcpu *vcpu) in pdptrs_changed() argument
558 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; in pdptrs_changed()
564 if (is_long_mode(vcpu) || !is_pae(vcpu)) in pdptrs_changed()
568 (unsigned long *)&vcpu->arch.regs_avail)) in pdptrs_changed()
571 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; in pdptrs_changed()
572 offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); in pdptrs_changed()
573 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), in pdptrs_changed()
577 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; in pdptrs_changed()
583 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in kvm_set_cr0() argument
585 unsigned long old_cr0 = kvm_read_cr0(vcpu); in kvm_set_cr0()
603 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { in kvm_set_cr0()
605 if ((vcpu->arch.efer & EFER_LME)) { in kvm_set_cr0()
608 if (!is_pae(vcpu)) in kvm_set_cr0()
610 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
615 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr0()
616 kvm_read_cr3(vcpu))) in kvm_set_cr0()
620 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) in kvm_set_cr0()
623 kvm_x86_ops->set_cr0(vcpu, cr0); in kvm_set_cr0()
626 kvm_clear_async_pf_completion_queue(vcpu); in kvm_set_cr0()
627 kvm_async_pf_hash_reset(vcpu); in kvm_set_cr0()
631 kvm_mmu_reset_context(vcpu); in kvm_set_cr0()
634 kvm_arch_has_noncoherent_dma(vcpu->kvm) && in kvm_set_cr0()
635 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in kvm_set_cr0()
636 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); in kvm_set_cr0()
642 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) in kvm_lmsw() argument
644 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); in kvm_lmsw()
648 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) in kvm_load_guest_xcr0() argument
650 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && in kvm_load_guest_xcr0()
651 !vcpu->guest_xcr0_loaded) { in kvm_load_guest_xcr0()
653 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xcr0()
654 vcpu->guest_xcr0_loaded = 1; in kvm_load_guest_xcr0()
658 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) in kvm_put_guest_xcr0() argument
660 if (vcpu->guest_xcr0_loaded) { in kvm_put_guest_xcr0()
661 if (vcpu->arch.xcr0 != host_xcr0) in kvm_put_guest_xcr0()
663 vcpu->guest_xcr0_loaded = 0; in kvm_put_guest_xcr0()
667 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) in __kvm_set_xcr() argument
670 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
686 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
700 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
703 kvm_update_cpuid(vcpu); in __kvm_set_xcr()
707 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) in kvm_set_xcr() argument
709 if (kvm_x86_ops->get_cpl(vcpu) != 0 || in kvm_set_xcr()
710 __kvm_set_xcr(vcpu, index, xcr)) { in kvm_set_xcr()
711 kvm_inject_gp(vcpu, 0); in kvm_set_xcr()
718 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_set_cr4() argument
720 unsigned long old_cr4 = kvm_read_cr4(vcpu); in kvm_set_cr4()
727 if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) in kvm_set_cr4()
730 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) in kvm_set_cr4()
733 if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP)) in kvm_set_cr4()
736 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) in kvm_set_cr4()
739 if (is_long_mode(vcpu)) { in kvm_set_cr4()
742 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) in kvm_set_cr4()
744 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr4()
745 kvm_read_cr3(vcpu))) in kvm_set_cr4()
749 if (!guest_cpuid_has_pcid(vcpu)) in kvm_set_cr4()
753 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) in kvm_set_cr4()
757 if (kvm_x86_ops->set_cr4(vcpu, cr4)) in kvm_set_cr4()
762 kvm_mmu_reset_context(vcpu); in kvm_set_cr4()
765 kvm_update_cpuid(vcpu); in kvm_set_cr4()
771 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) in kvm_set_cr3() argument
777 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { in kvm_set_cr3()
778 kvm_mmu_sync_roots(vcpu); in kvm_set_cr3()
779 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_set_cr3()
783 if (is_long_mode(vcpu)) { in kvm_set_cr3()
786 } else if (is_pae(vcpu) && is_paging(vcpu) && in kvm_set_cr3()
787 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3()
790 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
791 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in kvm_set_cr3()
792 kvm_mmu_new_cr3(vcpu); in kvm_set_cr3()
797 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) in kvm_set_cr8() argument
801 if (lapic_in_kernel(vcpu)) in kvm_set_cr8()
802 kvm_lapic_set_tpr(vcpu, cr8); in kvm_set_cr8()
804 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
809 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) in kvm_get_cr8() argument
811 if (lapic_in_kernel(vcpu)) in kvm_get_cr8()
812 return kvm_lapic_get_cr8(vcpu); in kvm_get_cr8()
814 return vcpu->arch.cr8; in kvm_get_cr8()
818 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) in kvm_update_dr0123() argument
822 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
824 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
825 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_update_dr0123()
829 static void kvm_update_dr6(struct kvm_vcpu *vcpu) in kvm_update_dr6() argument
831 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in kvm_update_dr6()
832 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); in kvm_update_dr6()
835 static void kvm_update_dr7(struct kvm_vcpu *vcpu) in kvm_update_dr7() argument
839 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
840 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
842 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
843 kvm_x86_ops->set_dr7(vcpu, dr7); in kvm_update_dr7()
844 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
846 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
849 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) in kvm_dr6_fixed() argument
853 if (!guest_cpuid_has_rtm(vcpu)) in kvm_dr6_fixed()
858 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) in __kvm_set_dr() argument
862 vcpu->arch.db[dr] = val; in __kvm_set_dr()
863 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in __kvm_set_dr()
864 vcpu->arch.eff_db[dr] = val; in __kvm_set_dr()
871 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in __kvm_set_dr()
872 kvm_update_dr6(vcpu); in __kvm_set_dr()
879 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in __kvm_set_dr()
880 kvm_update_dr7(vcpu); in __kvm_set_dr()
887 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) in kvm_set_dr() argument
889 if (__kvm_set_dr(vcpu, dr, val)) { in kvm_set_dr()
890 kvm_inject_gp(vcpu, 0); in kvm_set_dr()
897 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) in kvm_get_dr() argument
901 *val = vcpu->arch.db[dr]; in kvm_get_dr()
906 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_get_dr()
907 *val = vcpu->arch.dr6; in kvm_get_dr()
909 *val = kvm_x86_ops->get_dr6(vcpu); in kvm_get_dr()
914 *val = vcpu->arch.dr7; in kvm_get_dr()
921 bool kvm_rdpmc(struct kvm_vcpu *vcpu) in kvm_rdpmc() argument
923 u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); in kvm_rdpmc()
927 err = kvm_pmu_rdpmc(vcpu, ecx, &data); in kvm_rdpmc()
930 kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); in kvm_rdpmc()
931 kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); in kvm_rdpmc()
981 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in kvm_valid_efer() argument
989 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); in kvm_valid_efer()
997 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); in kvm_valid_efer()
1006 static int set_efer(struct kvm_vcpu *vcpu, u64 efer) in set_efer() argument
1008 u64 old_efer = vcpu->arch.efer; in set_efer()
1010 if (!kvm_valid_efer(vcpu, efer)) in set_efer()
1013 if (is_paging(vcpu) in set_efer()
1014 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1018 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1020 kvm_x86_ops->set_efer(vcpu, efer); in set_efer()
1024 kvm_mmu_reset_context(vcpu); in set_efer()
1040 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) in kvm_set_msr() argument
1067 return kvm_x86_ops->set_msr(vcpu, msr); in kvm_set_msr()
1074 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_get_msr() argument
1081 r = kvm_get_msr(vcpu, &msr); in do_get_msr()
1089 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_set_msr() argument
1096 return kvm_set_msr(vcpu, &msr); in do_set_msr()
1140 void kvm_set_pending_timer(struct kvm_vcpu *vcpu) in kvm_set_pending_timer() argument
1147 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); in kvm_set_pending_timer()
1243 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) in nsec_to_cycles() argument
1245 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, in nsec_to_cycles()
1246 vcpu->arch.virtual_tsc_shift); in nsec_to_cycles()
1256 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) in set_tsc_khz() argument
1262 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in set_tsc_khz()
1269 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
1270 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
1288 vcpu->arch.tsc_scaling_ratio = ratio; in set_tsc_khz()
1292 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) in kvm_set_tsc_khz() argument
1300 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in kvm_set_tsc_khz()
1306 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
1307 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
1308 vcpu->arch.virtual_tsc_khz = this_tsc_khz; in kvm_set_tsc_khz()
1322 return set_tsc_khz(vcpu, this_tsc_khz, use_scaling); in kvm_set_tsc_khz()
1325 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) in compute_guest_tsc() argument
1327 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
1328 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
1329 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
1330 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
1334 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) in kvm_track_tsc_matching() argument
1338 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
1342 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
1354 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_track_tsc_matching()
1356 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
1357 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
1362 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) in update_ia32_tsc_adjust_msr() argument
1364 u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); in update_ia32_tsc_adjust_msr()
1365 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; in update_ia32_tsc_adjust_msr()
1383 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) in kvm_scale_tsc() argument
1386 u64 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_scale_tsc()
1395 static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) in kvm_compute_tsc_offset() argument
1399 tsc = kvm_scale_tsc(vcpu, rdtsc()); in kvm_compute_tsc_offset()
1404 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) in kvm_read_l1_tsc() argument
1406 return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc)); in kvm_read_l1_tsc()
1410 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) in kvm_write_tsc() argument
1412 struct kvm *kvm = vcpu->kvm; in kvm_write_tsc()
1421 offset = kvm_compute_tsc_offset(vcpu, data); in kvm_write_tsc()
1425 if (vcpu->arch.virtual_tsc_khz) { in kvm_write_tsc()
1431 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; in kvm_write_tsc()
1446 : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); in kvm_write_tsc()
1471 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_write_tsc()
1476 u64 delta = nsec_to_cycles(vcpu, elapsed); in kvm_write_tsc()
1478 offset = kvm_compute_tsc_offset(vcpu, data); in kvm_write_tsc()
1482 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_write_tsc()
1508 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_write_tsc()
1510 vcpu->arch.last_guest_tsc = data; in kvm_write_tsc()
1513 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_write_tsc()
1514 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_write_tsc()
1515 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_write_tsc()
1517 if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) in kvm_write_tsc()
1518 update_ia32_tsc_adjust_msr(vcpu, offset); in kvm_write_tsc()
1519 kvm_x86_ops->write_tsc_offset(vcpu, offset); in kvm_write_tsc()
1529 kvm_track_tsc_matching(vcpu); in kvm_write_tsc()
1535 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, in adjust_tsc_offset_guest() argument
1538 kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment); in adjust_tsc_offset_guest()
1541 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) in adjust_tsc_offset_host() argument
1543 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) in adjust_tsc_offset_host()
1545 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment); in adjust_tsc_offset_host()
1546 kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment); in adjust_tsc_offset_host()
1689 struct kvm_vcpu *vcpu; in kvm_gen_update_masterclock() local
1697 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
1698 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_gen_update_masterclock()
1701 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
1702 clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); in kvm_gen_update_masterclock()
1711 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update() local
1759 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
1769 if (!vcpu->pv_time_enabled) in kvm_guest_time_update()
1772 if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { in kvm_guest_time_update()
1774 vcpu->virtual_tsc_khz : this_tsc_khz; in kvm_guest_time_update()
1776 &vcpu->hv_clock.tsc_shift, in kvm_guest_time_update()
1777 &vcpu->hv_clock.tsc_to_system_mul); in kvm_guest_time_update()
1778 vcpu->hw_tsc_khz = this_tsc_khz; in kvm_guest_time_update()
1782 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; in kvm_guest_time_update()
1783 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
1784 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
1786 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1806 vcpu->hv_clock.version = guest_hv_clock.version + 1; in kvm_guest_time_update()
1807 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1808 &vcpu->hv_clock, in kvm_guest_time_update()
1809 sizeof(vcpu->hv_clock.version)); in kvm_guest_time_update()
1816 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_guest_time_update()
1818 vcpu->pvclock_set_guest_stopped_request = false; in kvm_guest_time_update()
1825 vcpu->hv_clock.flags = pvclock_flags; in kvm_guest_time_update()
1827 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); in kvm_guest_time_update()
1829 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1830 &vcpu->hv_clock, in kvm_guest_time_update()
1831 sizeof(vcpu->hv_clock)); in kvm_guest_time_update()
1835 vcpu->hv_clock.version++; in kvm_guest_time_update()
1836 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1837 &vcpu->hv_clock, in kvm_guest_time_update()
1838 sizeof(vcpu->hv_clock.version)); in kvm_guest_time_update()
1865 struct kvm_vcpu *vcpu; in kvmclock_update_fn() local
1867 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_update_fn()
1868 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvmclock_update_fn()
1869 kvm_vcpu_kick(vcpu); in kvmclock_update_fn()
1899 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) in set_msr_mce() argument
1901 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
1906 vcpu->arch.mcg_status = data; in set_msr_mce()
1913 vcpu->arch.mcg_ctl = data; in set_msr_mce()
1927 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
1935 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) in xen_hvm_config() argument
1937 struct kvm *kvm = vcpu->kvm; in xen_hvm_config()
1938 int lm = is_long_mode(vcpu); in xen_hvm_config()
1957 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) in xen_hvm_config()
1966 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) in kvm_pv_enable_async_pf() argument
1974 vcpu->arch.apf.msr_val = data; in kvm_pv_enable_async_pf()
1977 kvm_clear_async_pf_completion_queue(vcpu); in kvm_pv_enable_async_pf()
1978 kvm_async_pf_hash_reset(vcpu); in kvm_pv_enable_async_pf()
1982 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
1986 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
1987 kvm_async_pf_wakeup_all(vcpu); in kvm_pv_enable_async_pf()
1991 static void kvmclock_reset(struct kvm_vcpu *vcpu) in kvmclock_reset() argument
1993 vcpu->arch.pv_time_enabled = false; in kvmclock_reset()
1996 static void accumulate_steal_time(struct kvm_vcpu *vcpu) in accumulate_steal_time() argument
2000 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in accumulate_steal_time()
2003 delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; in accumulate_steal_time()
2004 vcpu->arch.st.last_steal = current->sched_info.run_delay; in accumulate_steal_time()
2005 vcpu->arch.st.accum_steal = delta; in accumulate_steal_time()
2008 static void record_steal_time(struct kvm_vcpu *vcpu) in record_steal_time() argument
2010 accumulate_steal_time(vcpu); in record_steal_time()
2012 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
2015 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2016 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) in record_steal_time()
2019 vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; in record_steal_time()
2020 vcpu->arch.st.steal.version += 2; in record_steal_time()
2021 vcpu->arch.st.accum_steal = 0; in record_steal_time()
2023 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2024 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); in record_steal_time()
2027 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_msr_common() argument
2043 return set_efer(vcpu, data); in kvm_set_msr_common()
2050 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", in kvm_set_msr_common()
2057 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " in kvm_set_msr_common()
2071 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", in kvm_set_msr_common()
2075 return kvm_mtrr_set_msr(vcpu, msr, data); in kvm_set_msr_common()
2077 return kvm_set_apic_base(vcpu, msr_info); in kvm_set_msr_common()
2079 return kvm_x2apic_msr_write(vcpu, msr, data); in kvm_set_msr_common()
2081 kvm_set_lapic_tscdeadline_msr(vcpu, data); in kvm_set_msr_common()
2084 if (guest_cpuid_has_tsc_adjust(vcpu)) { in kvm_set_msr_common()
2086 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
2087 adjust_tsc_offset_guest(vcpu, adj); in kvm_set_msr_common()
2089 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
2093 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
2098 vcpu->arch.smbase = data; in kvm_set_msr_common()
2102 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
2103 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
2108 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_set_msr_common()
2110 kvmclock_reset(vcpu); in kvm_set_msr_common()
2112 if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) { in kvm_set_msr_common()
2117 &vcpu->requests); in kvm_set_msr_common()
2122 vcpu->arch.time = data; in kvm_set_msr_common()
2123 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_set_msr_common()
2131 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_set_msr_common()
2132 &vcpu->arch.pv_time, data & ~1ULL, in kvm_set_msr_common()
2134 vcpu->arch.pv_time_enabled = false; in kvm_set_msr_common()
2136 vcpu->arch.pv_time_enabled = true; in kvm_set_msr_common()
2141 if (kvm_pv_enable_async_pf(vcpu, data)) in kvm_set_msr_common()
2152 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, in kvm_set_msr_common()
2157 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
2162 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_set_msr_common()
2166 if (kvm_lapic_enable_pv_eoi(vcpu, data)) in kvm_set_msr_common()
2173 return set_msr_mce(vcpu, msr, data); in kvm_set_msr_common()
2180 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
2181 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
2184 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " in kvm_set_msr_common()
2200 return kvm_hv_set_msr_common(vcpu, msr, data, in kvm_set_msr_common()
2206 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); in kvm_set_msr_common()
2209 if (!guest_cpuid_has_osvw(vcpu)) in kvm_set_msr_common()
2211 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
2214 if (!guest_cpuid_has_osvw(vcpu)) in kvm_set_msr_common()
2216 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
2219 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
2220 return xen_hvm_config(vcpu, data); in kvm_set_msr_common()
2221 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
2222 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
2224 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", in kvm_set_msr_common()
2228 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", in kvm_set_msr_common()
2243 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) in kvm_get_msr() argument
2245 return kvm_x86_ops->get_msr(vcpu, msr); in kvm_get_msr()
2249 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) in get_msr_mce() argument
2252 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
2261 data = vcpu->arch.mcg_cap; in get_msr_mce()
2266 data = vcpu->arch.mcg_ctl; in get_msr_mce()
2269 data = vcpu->arch.mcg_status; in get_msr_mce()
2275 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
2284 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_get_msr_common() argument
2309 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
2310 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
2318 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
2337 msr_info->data = kvm_get_apic_base(vcpu); in kvm_get_msr_common()
2340 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
2343 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
2346 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
2349 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
2354 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
2363 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
2367 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
2371 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
2374 msr_info->data = vcpu->arch.apf.msr_val; in kvm_get_msr_common()
2377 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
2380 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
2388 return get_msr_mce(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
2404 return kvm_hv_get_msr_common(vcpu, in kvm_get_msr_common()
2421 if (!guest_cpuid_has_osvw(vcpu)) in kvm_get_msr_common()
2423 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
2426 if (!guest_cpuid_has_osvw(vcpu)) in kvm_get_msr_common()
2428 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
2431 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
2432 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
2434 vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index); in kvm_get_msr_common()
2437 vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index); in kvm_get_msr_common()
2451 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, in __msr_io() argument
2453 int (*do_msr)(struct kvm_vcpu *vcpu, in __msr_io() argument
2458 idx = srcu_read_lock(&vcpu->kvm->srcu); in __msr_io()
2460 if (do_msr(vcpu, entries[i].index, &entries[i].data)) in __msr_io()
2462 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __msr_io()
2472 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, in msr_io() argument
2473 int (*do_msr)(struct kvm_vcpu *vcpu, in msr_io() argument
2497 r = n = __msr_io(vcpu, &msrs, entries, do_msr); in msr_io()
2690 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) in need_emulate_wbinvd() argument
2692 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
2695 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
2698 if (need_emulate_wbinvd(vcpu)) { in kvm_arch_vcpu_load()
2700 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
2701 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
2702 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
2706 kvm_x86_ops->vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
2709 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
2710 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
2711 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
2712 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
2715 if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { in kvm_arch_vcpu_load()
2716 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
2717 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
2721 u64 offset = kvm_compute_tsc_offset(vcpu, in kvm_arch_vcpu_load()
2722 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
2723 kvm_x86_ops->write_tsc_offset(vcpu, offset); in kvm_arch_vcpu_load()
2724 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
2730 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
2731 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
2732 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
2733 kvm_migrate_timers(vcpu); in kvm_arch_vcpu_load()
2734 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
2737 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_arch_vcpu_load()
2738 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_arch_vcpu_load()
2741 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
2743 kvm_x86_ops->vcpu_put(vcpu); in kvm_arch_vcpu_put()
2744 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_put()
2745 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
2748 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_get_lapic() argument
2751 kvm_x86_ops->sync_pir_to_irr(vcpu); in kvm_vcpu_ioctl_get_lapic()
2752 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); in kvm_vcpu_ioctl_get_lapic()
2757 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_set_lapic() argument
2760 kvm_apic_post_state_restore(vcpu, s); in kvm_vcpu_ioctl_set_lapic()
2761 update_cr8_intercept(vcpu); in kvm_vcpu_ioctl_set_lapic()
2766 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) in kvm_cpu_accept_dm_intr() argument
2768 return (!lapic_in_kernel(vcpu) || in kvm_cpu_accept_dm_intr()
2769 kvm_apic_accept_pic_intr(vcpu)); in kvm_cpu_accept_dm_intr()
2778 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) in kvm_vcpu_ready_for_interrupt_injection() argument
2780 return kvm_arch_interrupt_allowed(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
2781 !kvm_cpu_has_interrupt(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
2782 !kvm_event_needs_reinjection(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
2783 kvm_cpu_accept_dm_intr(vcpu); in kvm_vcpu_ready_for_interrupt_injection()
2786 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_interrupt() argument
2792 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
2793 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
2794 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
2802 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
2805 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
2808 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
2809 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
2813 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_nmi() argument
2815 kvm_inject_nmi(vcpu); in kvm_vcpu_ioctl_nmi()
2820 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_smi() argument
2822 kvm_make_request(KVM_REQ_SMI, vcpu); in kvm_vcpu_ioctl_smi()
2827 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, in vcpu_ioctl_tpr_access_reporting() argument
2832 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
2836 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_setup_mce() argument
2848 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
2851 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
2854 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
2859 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_mce() argument
2862 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
2864 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
2873 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
2883 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
2884 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { in kvm_vcpu_ioctl_x86_set_mce()
2885 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_set_mce()
2892 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
2894 kvm_queue_exception(vcpu, MC_VECTOR); in kvm_vcpu_ioctl_x86_set_mce()
2907 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_vcpu_events() argument
2910 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
2912 vcpu->arch.exception.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
2913 !kvm_exception_is_soft(vcpu->arch.exception.nr); in kvm_vcpu_ioctl_x86_get_vcpu_events()
2914 events->exception.nr = vcpu->arch.exception.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2915 events->exception.has_error_code = vcpu->arch.exception.has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2917 events->exception.error_code = vcpu->arch.exception.error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2920 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2921 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2923 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
2925 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2926 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2927 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
2932 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
2933 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
2935 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
2936 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
2944 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events() argument
2953 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
2954 vcpu->arch.exception.pending = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2955 vcpu->arch.exception.nr = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2956 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2957 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2959 vcpu->arch.interrupt.pending = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2960 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2961 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2963 kvm_x86_ops->set_interrupt_shadow(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
2966 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2968 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2969 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
2972 kvm_vcpu_has_lapic(vcpu)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
2973 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2977 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2979 vcpu->arch.hflags &= ~HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2980 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2982 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2984 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
2985 if (kvm_vcpu_has_lapic(vcpu)) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
2987 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
2989 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
2993 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
2998 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_debugregs() argument
3003 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
3004 kvm_get_dr(vcpu, 6, &val); in kvm_vcpu_ioctl_x86_get_debugregs()
3006 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
3011 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_debugregs() argument
3022 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
3023 kvm_update_dr0123(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
3024 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
3025 kvm_update_dr6(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
3026 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
3027 kvm_update_dr7(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
3034 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) in fill_xsave() argument
3036 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; in fill_xsave()
3070 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) in load_xsave() argument
3072 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; in load_xsave()
3108 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xsave() argument
3113 fill_xsave((u8 *) guest_xsave->region, vcpu); in kvm_vcpu_ioctl_x86_get_xsave()
3116 &vcpu->arch.guest_fpu.state.fxsave, in kvm_vcpu_ioctl_x86_get_xsave()
3123 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xsave() argument
3137 load_xsave(vcpu, (u8 *)guest_xsave->region); in kvm_vcpu_ioctl_x86_set_xsave()
3141 memcpy(&vcpu->arch.guest_fpu.state.fxsave, in kvm_vcpu_ioctl_x86_set_xsave()
3147 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xcrs() argument
3158 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
3161 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xcrs() argument
3175 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, in kvm_vcpu_ioctl_x86_set_xcrs()
3190 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) in kvm_set_guest_paused() argument
3192 if (!vcpu->arch.pv_time_enabled) in kvm_set_guest_paused()
3194 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
3195 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_set_guest_paused()
3202 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
3216 if (!vcpu->arch.apic) in kvm_arch_vcpu_ioctl()
3223 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
3234 if (!vcpu->arch.apic) in kvm_arch_vcpu_ioctl()
3240 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
3249 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_ioctl()
3253 r = kvm_vcpu_ioctl_nmi(vcpu); in kvm_arch_vcpu_ioctl()
3257 r = kvm_vcpu_ioctl_smi(vcpu); in kvm_arch_vcpu_ioctl()
3267 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
3277 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
3288 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
3299 r = msr_io(vcpu, argp, do_get_msr, 1); in kvm_arch_vcpu_ioctl()
3302 r = msr_io(vcpu, argp, do_set_msr, 0); in kvm_arch_vcpu_ioctl()
3310 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); in kvm_arch_vcpu_ioctl()
3323 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
3328 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); in kvm_arch_vcpu_ioctl()
3337 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); in kvm_arch_vcpu_ioctl()
3346 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); in kvm_arch_vcpu_ioctl()
3352 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
3367 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
3373 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
3390 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
3399 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
3412 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
3421 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
3435 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
3450 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) in kvm_arch_vcpu_ioctl()
3456 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
3460 r = kvm_set_guest_paused(vcpu); in kvm_arch_vcpu_ioctl()
3471 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
4052 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, in vcpu_mmio_write() argument
4060 if (!(vcpu->arch.apic && in vcpu_mmio_write()
4061 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
4062 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_write()
4073 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) in vcpu_mmio_read() argument
4080 if (!(vcpu->arch.apic && in vcpu_mmio_read()
4081 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
4083 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_read()
4095 static void kvm_set_segment(struct kvm_vcpu *vcpu, in kvm_set_segment() argument
4098 kvm_x86_ops->set_segment(vcpu, var, seg); in kvm_set_segment()
4101 void kvm_get_segment(struct kvm_vcpu *vcpu, in kvm_get_segment() argument
4104 kvm_x86_ops->get_segment(vcpu, var, seg); in kvm_get_segment()
4107 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, in translate_nested_gpa() argument
4112 BUG_ON(!mmu_is_nested(vcpu)); in translate_nested_gpa()
4116 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa()
4121 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_read() argument
4124 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
4125 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
4128 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_fetch() argument
4131 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_fetch()
4133 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
4136 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_write() argument
4139 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
4141 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
4145 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_system() argument
4148 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
4152 struct kvm_vcpu *vcpu, u32 access, in kvm_read_guest_virt_helper() argument
4159 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper()
4167 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, in kvm_read_guest_virt_helper()
4187 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_fetch_guest_virt() local
4188 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
4193 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
4201 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, in kvm_fetch_guest_virt()
4213 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_read_guest_virt() local
4214 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
4216 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, in kvm_read_guest_virt()
4225 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_read_guest_virt_system() local
4226 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); in kvm_read_guest_virt_system()
4232 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_read_guest_phys_system() local
4233 int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes); in kvm_read_guest_phys_system()
4243 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_write_guest_virt_system() local
4248 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, in kvm_write_guest_virt_system()
4257 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); in kvm_write_guest_virt_system()
4272 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_mmio_gva_to_gpa() argument
4276 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
4279 if (vcpu_match_mmio_gva(vcpu, gva) in vcpu_mmio_gva_to_gpa()
4280 && !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
4281 vcpu->arch.access, access)) { in vcpu_mmio_gva_to_gpa()
4282 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
4288 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
4297 if (vcpu_match_mmio_gpa(vcpu, *gpa)) { in vcpu_mmio_gva_to_gpa()
4305 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, in emulator_write_phys() argument
4310 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); in emulator_write_phys()
4313 kvm_mmu_pte_write(vcpu, gpa, val, bytes); in emulator_write_phys()
4318 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
4320 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
4322 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4324 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4329 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) in read_prepare() argument
4331 if (vcpu->mmio_read_completed) { in read_prepare()
4333 vcpu->mmio_fragments[0].gpa, *(u64 *)val); in read_prepare()
4334 vcpu->mmio_read_completed = 0; in read_prepare()
4341 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in read_emulate() argument
4344 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); in read_emulate()
4347 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in write_emulate() argument
4350 return emulator_write_phys(vcpu, gpa, val, bytes); in write_emulate()
4353 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) in write_mmio() argument
4356 return vcpu_mmio_write(vcpu, gpa, bytes, val); in write_mmio()
4359 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in read_exit_mmio() argument
4366 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in write_exit_mmio() argument
4369 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
4371 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
4392 struct kvm_vcpu *vcpu, in emulator_read_write_onepage() argument
4400 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); in emulator_read_write_onepage()
4409 if (ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
4416 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
4424 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
4425 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
4438 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_write() local
4443 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
4446 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
4454 vcpu, ops); in emulator_read_write()
4466 vcpu, ops); in emulator_read_write()
4470 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
4473 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
4475 vcpu->mmio_needed = 1; in emulator_read_write()
4476 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
4478 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
4479 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
4480 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
4481 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
4483 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
4523 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_cmpxchg_emulated() local
4533 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); in emulator_cmpxchg_emulated()
4542 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); in emulator_cmpxchg_emulated()
4570 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); in emulator_cmpxchg_emulated()
4571 kvm_mmu_pte_write(vcpu, gpa, new, bytes); in emulator_cmpxchg_emulated()
4581 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) in kernel_pio() argument
4586 if (vcpu->arch.pio.in) in kernel_pio()
4587 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, in kernel_pio()
4588 vcpu->arch.pio.size, pd); in kernel_pio()
4590 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, in kernel_pio()
4591 vcpu->arch.pio.port, vcpu->arch.pio.size, in kernel_pio()
4596 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, in emulator_pio_in_out() argument
4600 vcpu->arch.pio.port = port; in emulator_pio_in_out()
4601 vcpu->arch.pio.in = in; in emulator_pio_in_out()
4602 vcpu->arch.pio.count = count; in emulator_pio_in_out()
4603 vcpu->arch.pio.size = size; in emulator_pio_in_out()
4605 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { in emulator_pio_in_out()
4606 vcpu->arch.pio.count = 0; in emulator_pio_in_out()
4610 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
4611 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
4612 vcpu->run->io.size = size; in emulator_pio_in_out()
4613 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
4614 vcpu->run->io.count = count; in emulator_pio_in_out()
4615 vcpu->run->io.port = port; in emulator_pio_in_out()
4624 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_pio_in_emulated() local
4627 if (vcpu->arch.pio.count) in emulator_pio_in_emulated()
4630 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); in emulator_pio_in_emulated()
4633 memcpy(val, vcpu->arch.pio_data, size * count); in emulator_pio_in_emulated()
4634 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); in emulator_pio_in_emulated()
4635 vcpu->arch.pio.count = 0; in emulator_pio_in_emulated()
4646 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_pio_out_emulated() local
4648 memcpy(vcpu->arch.pio_data, val, size * count); in emulator_pio_out_emulated()
4649 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); in emulator_pio_out_emulated()
4650 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); in emulator_pio_out_emulated()
4653 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) in get_segment_base() argument
4655 return kvm_x86_ops->get_segment_base(vcpu, seg); in get_segment_base()
4663 int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd_noskip() argument
4665 if (!need_emulate_wbinvd(vcpu)) in kvm_emulate_wbinvd_noskip()
4671 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
4672 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
4675 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
4681 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd() argument
4683 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_emulate_wbinvd()
4684 return kvm_emulate_wbinvd_noskip(vcpu); in kvm_emulate_wbinvd()
4715 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_cr() local
4720 value = kvm_read_cr0(vcpu); in emulator_get_cr()
4723 value = vcpu->arch.cr2; in emulator_get_cr()
4726 value = kvm_read_cr3(vcpu); in emulator_get_cr()
4729 value = kvm_read_cr4(vcpu); in emulator_get_cr()
4732 value = kvm_get_cr8(vcpu); in emulator_get_cr()
4744 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_cr() local
4749 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); in emulator_set_cr()
4752 vcpu->arch.cr2 = val; in emulator_set_cr()
4755 res = kvm_set_cr3(vcpu, val); in emulator_set_cr()
4758 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); in emulator_set_cr()
4761 res = kvm_set_cr8(vcpu, val); in emulator_set_cr()
4840 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_segment() local
4862 kvm_set_segment(vcpu, &var, seg); in emulator_set_segment()
4895 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_smbase() local
4897 return vcpu->arch.smbase; in emulator_get_smbase()
4902 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_smbase() local
4904 vcpu->arch.smbase = smbase; in emulator_set_smbase()
5009 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) in toggle_interruptibility() argument
5011 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in toggle_interruptibility()
5022 kvm_x86_ops->set_interrupt_shadow(vcpu, mask); in toggle_interruptibility()
5024 kvm_make_request(KVM_REQ_EVENT, vcpu); in toggle_interruptibility()
5028 static bool inject_emulated_exception(struct kvm_vcpu *vcpu) in inject_emulated_exception() argument
5030 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in inject_emulated_exception()
5032 return kvm_propagate_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
5035 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
5038 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
5042 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) in init_emulate_ctxt() argument
5044 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
5047 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
5049 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
5050 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
5051 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
5053 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : in init_emulate_ctxt()
5059 ctxt->emul_flags = vcpu->arch.hflags; in init_emulate_ctxt()
5062 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
5065 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) in kvm_inject_realmode_interrupt() argument
5067 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
5070 init_emulate_ctxt(vcpu); in kvm_inject_realmode_interrupt()
5081 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
5082 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
5085 vcpu->arch.nmi_pending = 0; in kvm_inject_realmode_interrupt()
5087 vcpu->arch.interrupt.pending = false; in kvm_inject_realmode_interrupt()
5093 static int handle_emulation_failure(struct kvm_vcpu *vcpu) in handle_emulation_failure() argument
5097 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
5098 trace_kvm_emulate_insn_failed(vcpu); in handle_emulation_failure()
5099 if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { in handle_emulation_failure()
5100 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
5101 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
5102 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
5105 kvm_queue_exception(vcpu, UD_VECTOR); in handle_emulation_failure()
5110 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, in reexecute_instruction() argument
5120 if (!vcpu->arch.mmu.direct_map) { in reexecute_instruction()
5125 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); in reexecute_instruction()
5141 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5153 if (vcpu->arch.mmu.direct_map) { in reexecute_instruction()
5156 spin_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
5157 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
5158 spin_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
5161 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5171 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5184 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in retry_instruction() local
5187 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
5188 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
5203 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
5214 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
5215 vcpu->arch.last_retry_addr = cr2; in retry_instruction()
5217 if (!vcpu->arch.mmu.direct_map) in retry_instruction()
5218 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); in retry_instruction()
5220 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
5225 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
5226 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
5228 static void kvm_smm_changed(struct kvm_vcpu *vcpu) in kvm_smm_changed() argument
5230 if (!(vcpu->arch.hflags & HF_SMM_MASK)) { in kvm_smm_changed()
5232 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); in kvm_smm_changed()
5234 if (unlikely(vcpu->arch.smi_pending)) { in kvm_smm_changed()
5235 kvm_make_request(KVM_REQ_SMI, vcpu); in kvm_smm_changed()
5236 vcpu->arch.smi_pending = 0; in kvm_smm_changed()
5239 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_smm_changed()
5243 kvm_mmu_reset_context(vcpu); in kvm_smm_changed()
5246 static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags) in kvm_set_hflags() argument
5248 unsigned changed = vcpu->arch.hflags ^ emul_flags; in kvm_set_hflags()
5250 vcpu->arch.hflags = emul_flags; in kvm_set_hflags()
5253 kvm_smm_changed(vcpu); in kvm_set_hflags()
5271 static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) in kvm_vcpu_check_singlestep() argument
5273 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_singlestep()
5284 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_check_singlestep()
5287 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; in kvm_vcpu_check_singlestep()
5292 vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; in kvm_vcpu_check_singlestep()
5298 vcpu->arch.dr6 &= ~15; in kvm_vcpu_check_singlestep()
5299 vcpu->arch.dr6 |= DR6_BS | DR6_RTM; in kvm_vcpu_check_singlestep()
5300 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_vcpu_check_singlestep()
5305 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) in kvm_vcpu_check_breakpoint() argument
5307 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_breakpoint()
5308 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_breakpoint()
5309 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_breakpoint()
5310 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_breakpoint()
5312 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_breakpoint()
5313 vcpu->arch.eff_db); in kvm_vcpu_check_breakpoint()
5325 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_breakpoint()
5326 !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { in kvm_vcpu_check_breakpoint()
5327 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_breakpoint()
5329 vcpu->arch.dr7, in kvm_vcpu_check_breakpoint()
5330 vcpu->arch.db); in kvm_vcpu_check_breakpoint()
5333 vcpu->arch.dr6 &= ~15; in kvm_vcpu_check_breakpoint()
5334 vcpu->arch.dr6 |= dr6 | DR6_RTM; in kvm_vcpu_check_breakpoint()
5335 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_vcpu_check_breakpoint()
5344 int x86_emulate_instruction(struct kvm_vcpu *vcpu, in x86_emulate_instruction() argument
5351 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
5353 bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
5359 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
5360 kvm_clear_exception_queue(vcpu); in x86_emulate_instruction()
5363 init_emulate_ctxt(vcpu); in x86_emulate_instruction()
5371 if (kvm_vcpu_check_breakpoint(vcpu, &r)) in x86_emulate_instruction()
5383 trace_kvm_emulate_insn_start(vcpu); in x86_emulate_instruction()
5384 ++vcpu->stat.insn_emulation; in x86_emulate_instruction()
5388 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, in x86_emulate_instruction()
5393 return handle_emulation_failure(vcpu); in x86_emulate_instruction()
5398 kvm_rip_write(vcpu, ctxt->_eip); in x86_emulate_instruction()
5400 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
5409 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
5410 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
5421 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, in x86_emulate_instruction()
5425 return handle_emulation_failure(vcpu); in x86_emulate_instruction()
5430 if (inject_emulated_exception(vcpu)) in x86_emulate_instruction()
5432 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
5433 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
5435 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
5438 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
5441 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
5442 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
5445 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
5452 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); in x86_emulate_instruction()
5453 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
5454 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
5455 if (vcpu->arch.hflags != ctxt->emul_flags) in x86_emulate_instruction()
5456 kvm_set_hflags(vcpu, ctxt->emul_flags); in x86_emulate_instruction()
5457 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
5459 kvm_vcpu_check_singlestep(vcpu, rflags, &r); in x86_emulate_instruction()
5462 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
5471 kvm_make_request(KVM_REQ_EVENT, vcpu); in x86_emulate_instruction()
5473 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
5479 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) in kvm_fast_pio_out() argument
5481 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); in kvm_fast_pio_out()
5482 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, in kvm_fast_pio_out()
5485 vcpu->arch.pio.count = 0; in kvm_fast_pio_out()
5514 struct kvm_vcpu *vcpu; in kvmclock_cpufreq_notifier() local
5565 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_cpufreq_notifier()
5566 if (vcpu->cpu != freq->cpu) in kvmclock_cpufreq_notifier()
5568 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvmclock_cpufreq_notifier()
5569 if (vcpu->cpu != smp_processor_id()) in kvmclock_cpufreq_notifier()
5681 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) in kvm_before_handle_nmi() argument
5683 __this_cpu_write(current_vcpu, vcpu); in kvm_before_handle_nmi()
5687 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) in kvm_after_handle_nmi() argument
5728 struct kvm_vcpu *vcpu; in pvclock_gtod_update_fn() local
5733 kvm_for_each_vcpu(i, vcpu, kvm) in pvclock_gtod_update_fn()
5734 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in pvclock_gtod_update_fn()
5843 int kvm_vcpu_halt(struct kvm_vcpu *vcpu) in kvm_vcpu_halt() argument
5845 ++vcpu->stat.halt_exits; in kvm_vcpu_halt()
5846 if (lapic_in_kernel(vcpu)) { in kvm_vcpu_halt()
5847 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_vcpu_halt()
5850 vcpu->run->exit_reason = KVM_EXIT_HLT; in kvm_vcpu_halt()
5856 int kvm_emulate_halt(struct kvm_vcpu *vcpu) in kvm_emulate_halt() argument
5858 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_emulate_halt()
5859 return kvm_vcpu_halt(vcpu); in kvm_emulate_halt()
5881 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) in kvm_emulate_hypercall() argument
5886 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_emulate_hypercall()
5888 if (kvm_hv_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
5889 return kvm_hv_hypercall(vcpu); in kvm_emulate_hypercall()
5891 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); in kvm_emulate_hypercall()
5892 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); in kvm_emulate_hypercall()
5893 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); in kvm_emulate_hypercall()
5894 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); in kvm_emulate_hypercall()
5895 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); in kvm_emulate_hypercall()
5899 op_64_bit = is_64_bit_mode(vcpu); in kvm_emulate_hypercall()
5908 if (kvm_x86_ops->get_cpl(vcpu) != 0) { in kvm_emulate_hypercall()
5918 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); in kvm_emulate_hypercall()
5928 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); in kvm_emulate_hypercall()
5929 ++vcpu->stat.hypercalls; in kvm_emulate_hypercall()
5936 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_fix_hypercall() local
5938 unsigned long rip = kvm_rip_read(vcpu); in emulator_fix_hypercall()
5940 kvm_x86_ops->patch_hypercall(vcpu, instruction); in emulator_fix_hypercall()
5945 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) in dm_request_for_irq_injection() argument
5947 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
5948 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
5951 static void post_kvm_run_save(struct kvm_vcpu *vcpu) in post_kvm_run_save() argument
5953 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
5955 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; in post_kvm_run_save()
5956 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; in post_kvm_run_save()
5957 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
5958 kvm_run->apic_base = kvm_get_apic_base(vcpu); in post_kvm_run_save()
5960 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
5961 kvm_vcpu_ready_for_interrupt_injection(vcpu); in post_kvm_run_save()
5964 static void update_cr8_intercept(struct kvm_vcpu *vcpu) in update_cr8_intercept() argument
5971 if (!vcpu->arch.apic) in update_cr8_intercept()
5974 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
5975 max_irr = kvm_lapic_find_highest_irr(vcpu); in update_cr8_intercept()
5982 tpr = kvm_lapic_get_cr8(vcpu); in update_cr8_intercept()
5984 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); in update_cr8_intercept()
5987 static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) in inject_pending_event() argument
5992 if (vcpu->arch.exception.pending) { in inject_pending_event()
5993 trace_kvm_inj_exception(vcpu->arch.exception.nr, in inject_pending_event()
5994 vcpu->arch.exception.has_error_code, in inject_pending_event()
5995 vcpu->arch.exception.error_code); in inject_pending_event()
5997 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) in inject_pending_event()
5998 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | in inject_pending_event()
6001 if (vcpu->arch.exception.nr == DB_VECTOR && in inject_pending_event()
6002 (vcpu->arch.dr7 & DR7_GD)) { in inject_pending_event()
6003 vcpu->arch.dr7 &= ~DR7_GD; in inject_pending_event()
6004 kvm_update_dr7(vcpu); in inject_pending_event()
6007 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, in inject_pending_event()
6008 vcpu->arch.exception.has_error_code, in inject_pending_event()
6009 vcpu->arch.exception.error_code, in inject_pending_event()
6010 vcpu->arch.exception.reinject); in inject_pending_event()
6014 if (vcpu->arch.nmi_injected) { in inject_pending_event()
6015 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
6019 if (vcpu->arch.interrupt.pending) { in inject_pending_event()
6020 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
6024 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
6025 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); in inject_pending_event()
6031 if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { in inject_pending_event()
6032 --vcpu->arch.nmi_pending; in inject_pending_event()
6033 vcpu->arch.nmi_injected = true; in inject_pending_event()
6034 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
6035 } else if (kvm_cpu_has_injectable_intr(vcpu)) { in inject_pending_event()
6043 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
6044 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); in inject_pending_event()
6048 if (kvm_x86_ops->interrupt_allowed(vcpu)) { in inject_pending_event()
6049 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), in inject_pending_event()
6051 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
6057 static void process_nmi(struct kvm_vcpu *vcpu) in process_nmi() argument
6066 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
6069 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
6070 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
6071 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_nmi()
6091 static void process_smi_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) in process_smi_save_seg_32() argument
6096 kvm_get_segment(vcpu, &seg, n); in process_smi_save_seg_32()
6110 static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) in process_smi_save_seg_64() argument
6116 kvm_get_segment(vcpu, &seg, n); in process_smi_save_seg_64()
6127 static void process_smi_save_state_32(struct kvm_vcpu *vcpu, char *buf) in process_smi_save_state_32() argument
6134 put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); in process_smi_save_state_32()
6135 put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); in process_smi_save_state_32()
6136 put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); in process_smi_save_state_32()
6137 put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); in process_smi_save_state_32()
6140 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i)); in process_smi_save_state_32()
6142 kvm_get_dr(vcpu, 6, &val); in process_smi_save_state_32()
6144 kvm_get_dr(vcpu, 7, &val); in process_smi_save_state_32()
6147 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); in process_smi_save_state_32()
6153 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); in process_smi_save_state_32()
6159 kvm_x86_ops->get_gdt(vcpu, &dt); in process_smi_save_state_32()
6163 kvm_x86_ops->get_idt(vcpu, &dt); in process_smi_save_state_32()
6168 process_smi_save_seg_32(vcpu, buf, i); in process_smi_save_state_32()
6170 put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); in process_smi_save_state_32()
6174 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); in process_smi_save_state_32()
6177 static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf) in process_smi_save_state_64() argument
6186 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i)); in process_smi_save_state_64()
6188 put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); in process_smi_save_state_64()
6189 put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); in process_smi_save_state_64()
6191 kvm_get_dr(vcpu, 6, &val); in process_smi_save_state_64()
6193 kvm_get_dr(vcpu, 7, &val); in process_smi_save_state_64()
6196 put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu)); in process_smi_save_state_64()
6197 put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu)); in process_smi_save_state_64()
6198 put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu)); in process_smi_save_state_64()
6200 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); in process_smi_save_state_64()
6205 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); in process_smi_save_state_64()
6207 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); in process_smi_save_state_64()
6213 kvm_x86_ops->get_idt(vcpu, &dt); in process_smi_save_state_64()
6217 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); in process_smi_save_state_64()
6223 kvm_x86_ops->get_gdt(vcpu, &dt); in process_smi_save_state_64()
6228 process_smi_save_seg_64(vcpu, buf, i); in process_smi_save_state_64()
6234 static void process_smi(struct kvm_vcpu *vcpu) in process_smi() argument
6241 if (is_smm(vcpu)) { in process_smi()
6242 vcpu->arch.smi_pending = true; in process_smi()
6246 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); in process_smi()
6247 vcpu->arch.hflags |= HF_SMM_MASK; in process_smi()
6249 if (guest_cpuid_has_longmode(vcpu)) in process_smi()
6250 process_smi_save_state_64(vcpu, buf); in process_smi()
6252 process_smi_save_state_32(vcpu, buf); in process_smi()
6254 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); in process_smi()
6256 if (kvm_x86_ops->get_nmi_mask(vcpu)) in process_smi()
6257 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in process_smi()
6259 kvm_x86_ops->set_nmi_mask(vcpu, true); in process_smi()
6261 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); in process_smi()
6262 kvm_rip_write(vcpu, 0x8000); in process_smi()
6264 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in process_smi()
6265 kvm_x86_ops->set_cr0(vcpu, cr0); in process_smi()
6266 vcpu->arch.cr0 = cr0; in process_smi()
6268 kvm_x86_ops->set_cr4(vcpu, 0); in process_smi()
6272 kvm_x86_ops->set_idt(vcpu, &dt); in process_smi()
6274 __kvm_set_dr(vcpu, 7, DR7_FIXED_1); in process_smi()
6276 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in process_smi()
6277 cs.base = vcpu->arch.smbase; in process_smi()
6294 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in process_smi()
6295 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); in process_smi()
6296 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); in process_smi()
6297 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); in process_smi()
6298 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); in process_smi()
6299 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); in process_smi()
6301 if (guest_cpuid_has_longmode(vcpu)) in process_smi()
6302 kvm_x86_ops->set_efer(vcpu, 0); in process_smi()
6304 kvm_update_cpuid(vcpu); in process_smi()
6305 kvm_mmu_reset_context(vcpu); in process_smi()
6308 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) in vcpu_scan_ioapic() argument
6310 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_scan_ioapic()
6313 memset(vcpu->arch.eoi_exit_bitmap, 0, 256 / 8); in vcpu_scan_ioapic()
6315 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
6316 kvm_scan_ioapic_routes(vcpu, vcpu->arch.eoi_exit_bitmap); in vcpu_scan_ioapic()
6318 kvm_x86_ops->sync_pir_to_irr(vcpu); in vcpu_scan_ioapic()
6319 kvm_ioapic_scan_entry(vcpu, vcpu->arch.eoi_exit_bitmap); in vcpu_scan_ioapic()
6321 kvm_x86_ops->load_eoi_exitmap(vcpu); in vcpu_scan_ioapic()
6324 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb() argument
6326 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb()
6327 kvm_x86_ops->tlb_flush(vcpu); in kvm_vcpu_flush_tlb()
6330 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) in kvm_vcpu_reload_apic_access_page() argument
6334 if (!lapic_in_kernel(vcpu)) in kvm_vcpu_reload_apic_access_page()
6340 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); in kvm_vcpu_reload_apic_access_page()
6343 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); in kvm_vcpu_reload_apic_access_page()
6369 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) in vcpu_enter_guest() argument
6373 dm_request_for_irq_injection(vcpu) && in vcpu_enter_guest()
6374 kvm_cpu_accept_dm_intr(vcpu); in vcpu_enter_guest()
6378 if (vcpu->requests) { in vcpu_enter_guest()
6379 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) in vcpu_enter_guest()
6380 kvm_mmu_unload(vcpu); in vcpu_enter_guest()
6381 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) in vcpu_enter_guest()
6382 __kvm_migrate_timers(vcpu); in vcpu_enter_guest()
6383 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
6384 kvm_gen_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
6385 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
6386 kvm_gen_kvmclock_update(vcpu); in vcpu_enter_guest()
6387 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { in vcpu_enter_guest()
6388 r = kvm_guest_time_update(vcpu); in vcpu_enter_guest()
6392 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) in vcpu_enter_guest()
6393 kvm_mmu_sync_roots(vcpu); in vcpu_enter_guest()
6394 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in vcpu_enter_guest()
6395 kvm_vcpu_flush_tlb(vcpu); in vcpu_enter_guest()
6396 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { in vcpu_enter_guest()
6397 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
6401 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in vcpu_enter_guest()
6402 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
6406 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { in vcpu_enter_guest()
6407 vcpu->fpu_active = 0; in vcpu_enter_guest()
6408 kvm_x86_ops->fpu_deactivate(vcpu); in vcpu_enter_guest()
6410 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { in vcpu_enter_guest()
6412 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
6416 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) in vcpu_enter_guest()
6417 record_steal_time(vcpu); in vcpu_enter_guest()
6418 if (kvm_check_request(KVM_REQ_SMI, vcpu)) in vcpu_enter_guest()
6419 process_smi(vcpu); in vcpu_enter_guest()
6420 if (kvm_check_request(KVM_REQ_NMI, vcpu)) in vcpu_enter_guest()
6421 process_nmi(vcpu); in vcpu_enter_guest()
6422 if (kvm_check_request(KVM_REQ_PMU, vcpu)) in vcpu_enter_guest()
6423 kvm_pmu_handle_event(vcpu); in vcpu_enter_guest()
6424 if (kvm_check_request(KVM_REQ_PMI, vcpu)) in vcpu_enter_guest()
6425 kvm_pmu_deliver_pmi(vcpu); in vcpu_enter_guest()
6426 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { in vcpu_enter_guest()
6427 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
6428 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
6429 (void *) vcpu->arch.eoi_exit_bitmap)) { in vcpu_enter_guest()
6430 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
6431 vcpu->run->eoi.vector = in vcpu_enter_guest()
6432 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
6437 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) in vcpu_enter_guest()
6438 vcpu_scan_ioapic(vcpu); in vcpu_enter_guest()
6439 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) in vcpu_enter_guest()
6440 kvm_vcpu_reload_apic_access_page(vcpu); in vcpu_enter_guest()
6441 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { in vcpu_enter_guest()
6442 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
6443 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
6447 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { in vcpu_enter_guest()
6448 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
6449 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
6459 if (kvm_lapic_enabled(vcpu)) { in vcpu_enter_guest()
6465 kvm_x86_ops->hwapic_irr_update(vcpu, in vcpu_enter_guest()
6466 kvm_lapic_find_highest_irr(vcpu)); in vcpu_enter_guest()
6469 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { in vcpu_enter_guest()
6470 kvm_apic_accept_events(vcpu); in vcpu_enter_guest()
6471 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
6476 if (inject_pending_event(vcpu, req_int_win) != 0) in vcpu_enter_guest()
6480 if (vcpu->arch.nmi_pending) in vcpu_enter_guest()
6481 kvm_x86_ops->enable_nmi_window(vcpu); in vcpu_enter_guest()
6482 if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) in vcpu_enter_guest()
6483 kvm_x86_ops->enable_irq_window(vcpu); in vcpu_enter_guest()
6486 if (kvm_lapic_enabled(vcpu)) { in vcpu_enter_guest()
6487 update_cr8_intercept(vcpu); in vcpu_enter_guest()
6488 kvm_lapic_sync_to_vapic(vcpu); in vcpu_enter_guest()
6492 r = kvm_mmu_reload(vcpu); in vcpu_enter_guest()
6499 kvm_x86_ops->prepare_guest_switch(vcpu); in vcpu_enter_guest()
6500 if (vcpu->fpu_active) in vcpu_enter_guest()
6501 kvm_load_guest_fpu(vcpu); in vcpu_enter_guest()
6502 vcpu->mode = IN_GUEST_MODE; in vcpu_enter_guest()
6504 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in vcpu_enter_guest()
6513 if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests in vcpu_enter_guest()
6515 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
6519 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
6524 kvm_load_guest_xcr0(vcpu); in vcpu_enter_guest()
6527 smp_send_reschedule(vcpu->cpu); in vcpu_enter_guest()
6529 trace_kvm_entry(vcpu->vcpu_id); in vcpu_enter_guest()
6530 wait_lapic_expire(vcpu); in vcpu_enter_guest()
6533 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
6535 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
6536 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
6537 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
6538 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
6539 set_debugreg(vcpu->arch.dr6, 6); in vcpu_enter_guest()
6540 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
6543 kvm_x86_ops->run(vcpu); in vcpu_enter_guest()
6551 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
6552 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
6553 kvm_x86_ops->sync_dirty_debug_regs(vcpu); in vcpu_enter_guest()
6554 kvm_update_dr0123(vcpu); in vcpu_enter_guest()
6555 kvm_update_dr6(vcpu); in vcpu_enter_guest()
6556 kvm_update_dr7(vcpu); in vcpu_enter_guest()
6557 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
6570 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
6572 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
6575 kvm_put_guest_xcr0(vcpu); in vcpu_enter_guest()
6578 kvm_x86_ops->handle_external_intr(vcpu); in vcpu_enter_guest()
6580 ++vcpu->stat.exits; in vcpu_enter_guest()
6594 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
6600 unsigned long rip = kvm_rip_read(vcpu); in vcpu_enter_guest()
6604 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
6605 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in vcpu_enter_guest()
6607 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
6608 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
6610 r = kvm_x86_ops->handle_exit(vcpu); in vcpu_enter_guest()
6614 kvm_x86_ops->cancel_injection(vcpu); in vcpu_enter_guest()
6615 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
6616 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
6621 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) in vcpu_block() argument
6623 if (!kvm_arch_vcpu_runnable(vcpu) && in vcpu_block()
6624 (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) { in vcpu_block()
6625 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_block()
6626 kvm_vcpu_block(vcpu); in vcpu_block()
6627 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_block()
6630 kvm_x86_ops->post_block(vcpu); in vcpu_block()
6632 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) in vcpu_block()
6636 kvm_apic_accept_events(vcpu); in vcpu_block()
6637 switch(vcpu->arch.mp_state) { in vcpu_block()
6639 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
6640 vcpu->arch.mp_state = in vcpu_block()
6643 vcpu->arch.apf.halted = false; in vcpu_block()
6654 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) in kvm_vcpu_running() argument
6656 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
6657 !vcpu->arch.apf.halted); in kvm_vcpu_running()
6660 static int vcpu_run(struct kvm_vcpu *vcpu) in vcpu_run() argument
6663 struct kvm *kvm = vcpu->kvm; in vcpu_run()
6665 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
6668 if (kvm_vcpu_running(vcpu)) { in vcpu_run()
6669 r = vcpu_enter_guest(vcpu); in vcpu_run()
6671 r = vcpu_block(kvm, vcpu); in vcpu_run()
6677 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); in vcpu_run()
6678 if (kvm_cpu_has_pending_timer(vcpu)) in vcpu_run()
6679 kvm_inject_pending_timer_irqs(vcpu); in vcpu_run()
6681 if (dm_request_for_irq_injection(vcpu) && in vcpu_run()
6682 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { in vcpu_run()
6684 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
6685 ++vcpu->stat.request_irq_exits; in vcpu_run()
6689 kvm_check_async_pf_completion(vcpu); in vcpu_run()
6693 vcpu->run->exit_reason = KVM_EXIT_INTR; in vcpu_run()
6694 ++vcpu->stat.signal_exits; in vcpu_run()
6698 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
6700 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
6704 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
6709 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) in complete_emulated_io() argument
6712 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in complete_emulated_io()
6713 r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); in complete_emulated_io()
6714 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in complete_emulated_io()
6720 static int complete_emulated_pio(struct kvm_vcpu *vcpu) in complete_emulated_pio() argument
6722 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
6724 return complete_emulated_io(vcpu); in complete_emulated_pio()
6745 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) in complete_emulated_mmio() argument
6747 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
6751 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
6754 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
6756 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
6762 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
6770 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
6771 vcpu->mmio_needed = 0; in complete_emulated_mmio()
6774 if (vcpu->mmio_is_write) in complete_emulated_mmio()
6776 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
6777 return complete_emulated_io(vcpu); in complete_emulated_mmio()
6782 if (vcpu->mmio_is_write) in complete_emulated_mmio()
6785 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
6786 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
6791 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) in kvm_arch_vcpu_ioctl_run() argument
6799 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
6800 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); in kvm_arch_vcpu_ioctl_run()
6802 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
6803 kvm_vcpu_block(vcpu); in kvm_arch_vcpu_ioctl_run()
6804 kvm_apic_accept_events(vcpu); in kvm_arch_vcpu_ioctl_run()
6805 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); in kvm_arch_vcpu_ioctl_run()
6811 if (!lapic_in_kernel(vcpu)) { in kvm_arch_vcpu_ioctl_run()
6812 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
6818 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
6819 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
6820 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
6821 r = cui(vcpu); in kvm_arch_vcpu_ioctl_run()
6825 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
6827 r = vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
6830 post_kvm_run_save(vcpu); in kvm_arch_vcpu_ioctl_run()
6831 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
6837 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
6839 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in kvm_arch_vcpu_ioctl_get_regs()
6847 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_ioctl_get_regs()
6848 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in kvm_arch_vcpu_ioctl_get_regs()
6850 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); in kvm_arch_vcpu_ioctl_get_regs()
6851 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); in kvm_arch_vcpu_ioctl_get_regs()
6852 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); in kvm_arch_vcpu_ioctl_get_regs()
6853 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); in kvm_arch_vcpu_ioctl_get_regs()
6854 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); in kvm_arch_vcpu_ioctl_get_regs()
6855 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); in kvm_arch_vcpu_ioctl_get_regs()
6856 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); in kvm_arch_vcpu_ioctl_get_regs()
6857 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); in kvm_arch_vcpu_ioctl_get_regs()
6859 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); in kvm_arch_vcpu_ioctl_get_regs()
6860 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); in kvm_arch_vcpu_ioctl_get_regs()
6861 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); in kvm_arch_vcpu_ioctl_get_regs()
6862 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); in kvm_arch_vcpu_ioctl_get_regs()
6863 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); in kvm_arch_vcpu_ioctl_get_regs()
6864 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); in kvm_arch_vcpu_ioctl_get_regs()
6865 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); in kvm_arch_vcpu_ioctl_get_regs()
6866 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); in kvm_arch_vcpu_ioctl_get_regs()
6869 regs->rip = kvm_rip_read(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
6870 regs->rflags = kvm_get_rflags(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
6875 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
6877 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in kvm_arch_vcpu_ioctl_set_regs()
6878 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in kvm_arch_vcpu_ioctl_set_regs()
6880 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); in kvm_arch_vcpu_ioctl_set_regs()
6881 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); in kvm_arch_vcpu_ioctl_set_regs()
6882 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); in kvm_arch_vcpu_ioctl_set_regs()
6883 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); in kvm_arch_vcpu_ioctl_set_regs()
6884 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); in kvm_arch_vcpu_ioctl_set_regs()
6885 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); in kvm_arch_vcpu_ioctl_set_regs()
6886 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); in kvm_arch_vcpu_ioctl_set_regs()
6887 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); in kvm_arch_vcpu_ioctl_set_regs()
6889 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); in kvm_arch_vcpu_ioctl_set_regs()
6890 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); in kvm_arch_vcpu_ioctl_set_regs()
6891 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); in kvm_arch_vcpu_ioctl_set_regs()
6892 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); in kvm_arch_vcpu_ioctl_set_regs()
6893 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); in kvm_arch_vcpu_ioctl_set_regs()
6894 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); in kvm_arch_vcpu_ioctl_set_regs()
6895 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); in kvm_arch_vcpu_ioctl_set_regs()
6896 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); in kvm_arch_vcpu_ioctl_set_regs()
6899 kvm_rip_write(vcpu, regs->rip); in kvm_arch_vcpu_ioctl_set_regs()
6900 kvm_set_rflags(vcpu, regs->rflags); in kvm_arch_vcpu_ioctl_set_regs()
6902 vcpu->arch.exception.pending = false; in kvm_arch_vcpu_ioctl_set_regs()
6904 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_arch_vcpu_ioctl_set_regs()
6909 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) in kvm_get_cs_db_l_bits() argument
6913 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_get_cs_db_l_bits()
6919 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
6924 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in kvm_arch_vcpu_ioctl_get_sregs()
6925 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in kvm_arch_vcpu_ioctl_get_sregs()
6926 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in kvm_arch_vcpu_ioctl_get_sregs()
6927 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in kvm_arch_vcpu_ioctl_get_sregs()
6928 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in kvm_arch_vcpu_ioctl_get_sregs()
6929 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in kvm_arch_vcpu_ioctl_get_sregs()
6931 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in kvm_arch_vcpu_ioctl_get_sregs()
6932 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in kvm_arch_vcpu_ioctl_get_sregs()
6934 kvm_x86_ops->get_idt(vcpu, &dt); in kvm_arch_vcpu_ioctl_get_sregs()
6937 kvm_x86_ops->get_gdt(vcpu, &dt); in kvm_arch_vcpu_ioctl_get_sregs()
6941 sregs->cr0 = kvm_read_cr0(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
6942 sregs->cr2 = vcpu->arch.cr2; in kvm_arch_vcpu_ioctl_get_sregs()
6943 sregs->cr3 = kvm_read_cr3(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
6944 sregs->cr4 = kvm_read_cr4(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
6945 sregs->cr8 = kvm_get_cr8(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
6946 sregs->efer = vcpu->arch.efer; in kvm_arch_vcpu_ioctl_get_sregs()
6947 sregs->apic_base = kvm_get_apic_base(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
6951 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) in kvm_arch_vcpu_ioctl_get_sregs()
6952 set_bit(vcpu->arch.interrupt.nr, in kvm_arch_vcpu_ioctl_get_sregs()
6958 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
6961 kvm_apic_accept_events(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
6962 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && in kvm_arch_vcpu_ioctl_get_mpstate()
6963 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
6966 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
6971 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
6974 if (!kvm_vcpu_has_lapic(vcpu) && in kvm_arch_vcpu_ioctl_set_mpstate()
6979 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
6980 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
6982 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
6983 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
6987 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, in kvm_task_switch() argument
6990 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in kvm_task_switch()
6993 init_emulate_ctxt(vcpu); in kvm_task_switch()
7001 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
7002 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
7003 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_task_switch()
7008 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
7016 if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) in kvm_arch_vcpu_ioctl_set_sregs()
7021 kvm_x86_ops->set_idt(vcpu, &dt); in kvm_arch_vcpu_ioctl_set_sregs()
7024 kvm_x86_ops->set_gdt(vcpu, &dt); in kvm_arch_vcpu_ioctl_set_sregs()
7026 vcpu->arch.cr2 = sregs->cr2; in kvm_arch_vcpu_ioctl_set_sregs()
7027 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in kvm_arch_vcpu_ioctl_set_sregs()
7028 vcpu->arch.cr3 = sregs->cr3; in kvm_arch_vcpu_ioctl_set_sregs()
7029 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in kvm_arch_vcpu_ioctl_set_sregs()
7031 kvm_set_cr8(vcpu, sregs->cr8); in kvm_arch_vcpu_ioctl_set_sregs()
7033 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in kvm_arch_vcpu_ioctl_set_sregs()
7034 kvm_x86_ops->set_efer(vcpu, sregs->efer); in kvm_arch_vcpu_ioctl_set_sregs()
7037 kvm_set_apic_base(vcpu, &apic_base_msr); in kvm_arch_vcpu_ioctl_set_sregs()
7039 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in kvm_arch_vcpu_ioctl_set_sregs()
7040 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); in kvm_arch_vcpu_ioctl_set_sregs()
7041 vcpu->arch.cr0 = sregs->cr0; in kvm_arch_vcpu_ioctl_set_sregs()
7043 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in kvm_arch_vcpu_ioctl_set_sregs()
7044 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); in kvm_arch_vcpu_ioctl_set_sregs()
7046 kvm_update_cpuid(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
7048 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_set_sregs()
7049 if (!is_long_mode(vcpu) && is_pae(vcpu)) { in kvm_arch_vcpu_ioctl_set_sregs()
7050 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in kvm_arch_vcpu_ioctl_set_sregs()
7053 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_set_sregs()
7056 kvm_mmu_reset_context(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
7062 kvm_queue_interrupt(vcpu, pending_vec, false); in kvm_arch_vcpu_ioctl_set_sregs()
7066 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in kvm_arch_vcpu_ioctl_set_sregs()
7067 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in kvm_arch_vcpu_ioctl_set_sregs()
7068 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in kvm_arch_vcpu_ioctl_set_sregs()
7069 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in kvm_arch_vcpu_ioctl_set_sregs()
7070 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in kvm_arch_vcpu_ioctl_set_sregs()
7071 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in kvm_arch_vcpu_ioctl_set_sregs()
7073 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in kvm_arch_vcpu_ioctl_set_sregs()
7074 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in kvm_arch_vcpu_ioctl_set_sregs()
7076 update_cr8_intercept(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
7079 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && in kvm_arch_vcpu_ioctl_set_sregs()
7081 !is_protmode(vcpu)) in kvm_arch_vcpu_ioctl_set_sregs()
7082 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_set_sregs()
7084 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
7089 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
7097 if (vcpu->arch.exception.pending) in kvm_arch_vcpu_ioctl_set_guest_debug()
7100 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
7102 kvm_queue_exception(vcpu, BP_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
7109 rflags = kvm_get_rflags(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
7111 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
7112 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
7113 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
7115 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
7117 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
7118 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
7121 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
7123 kvm_update_dr7(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
7125 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
7126 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + in kvm_arch_vcpu_ioctl_set_guest_debug()
7127 get_segment_base(vcpu, VCPU_SREG_CS); in kvm_arch_vcpu_ioctl_set_guest_debug()
7133 kvm_set_rflags(vcpu, rflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
7135 kvm_x86_ops->update_bp_intercept(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
7147 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
7154 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
7155 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); in kvm_arch_vcpu_ioctl_translate()
7156 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
7165 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
7168 &vcpu->arch.guest_fpu.state.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
7182 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
7185 &vcpu->arch.guest_fpu.state.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
7199 static void fx_init(struct kvm_vcpu *vcpu) in fx_init() argument
7201 fpstate_init(&vcpu->arch.guest_fpu.state); in fx_init()
7203 vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv = in fx_init()
7209 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in fx_init()
7211 vcpu->arch.cr0 |= X86_CR0_ET; in fx_init()
7214 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) in kvm_load_guest_fpu() argument
7216 if (vcpu->guest_fpu_loaded) in kvm_load_guest_fpu()
7224 vcpu->guest_fpu_loaded = 1; in kvm_load_guest_fpu()
7226 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state); in kvm_load_guest_fpu()
7230 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) in kvm_put_guest_fpu() argument
7232 if (!vcpu->guest_fpu_loaded) { in kvm_put_guest_fpu()
7233 vcpu->fpu_counter = 0; in kvm_put_guest_fpu()
7237 vcpu->guest_fpu_loaded = 0; in kvm_put_guest_fpu()
7238 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); in kvm_put_guest_fpu()
7240 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
7247 if (!vcpu->arch.eager_fpu) { in kvm_put_guest_fpu()
7248 if (++vcpu->fpu_counter < 5) in kvm_put_guest_fpu()
7249 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); in kvm_put_guest_fpu()
7254 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_free() argument
7256 kvmclock_reset(vcpu); in kvm_arch_vcpu_free()
7258 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_free()
7259 kvm_x86_ops->vcpu_free(vcpu); in kvm_arch_vcpu_free()
7265 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_create() local
7272 vcpu = kvm_x86_ops->vcpu_create(kvm, id); in kvm_arch_vcpu_create()
7274 return vcpu; in kvm_arch_vcpu_create()
7277 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_setup() argument
7281 kvm_vcpu_mtrr_init(vcpu); in kvm_arch_vcpu_setup()
7282 r = vcpu_load(vcpu); in kvm_arch_vcpu_setup()
7285 kvm_vcpu_reset(vcpu, false); in kvm_arch_vcpu_setup()
7286 kvm_mmu_setup(vcpu); in kvm_arch_vcpu_setup()
7287 vcpu_put(vcpu); in kvm_arch_vcpu_setup()
7291 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
7294 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
7296 if (vcpu_load(vcpu)) in kvm_arch_vcpu_postcreate()
7301 kvm_write_tsc(vcpu, &msr); in kvm_arch_vcpu_postcreate()
7302 vcpu_put(vcpu); in kvm_arch_vcpu_postcreate()
7311 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
7314 vcpu->arch.apf.msr_val = 0; in kvm_arch_vcpu_destroy()
7316 r = vcpu_load(vcpu); in kvm_arch_vcpu_destroy()
7318 kvm_mmu_unload(vcpu); in kvm_arch_vcpu_destroy()
7319 vcpu_put(vcpu); in kvm_arch_vcpu_destroy()
7321 kvm_x86_ops->vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
7324 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) in kvm_vcpu_reset() argument
7326 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
7328 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
7329 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
7330 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
7331 kvm_clear_interrupt_queue(vcpu); in kvm_vcpu_reset()
7332 kvm_clear_exception_queue(vcpu); in kvm_vcpu_reset()
7334 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
7335 kvm_update_dr0123(vcpu); in kvm_vcpu_reset()
7336 vcpu->arch.dr6 = DR6_INIT; in kvm_vcpu_reset()
7337 kvm_update_dr6(vcpu); in kvm_vcpu_reset()
7338 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
7339 kvm_update_dr7(vcpu); in kvm_vcpu_reset()
7341 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
7343 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_reset()
7344 vcpu->arch.apf.msr_val = 0; in kvm_vcpu_reset()
7345 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
7347 kvmclock_reset(vcpu); in kvm_vcpu_reset()
7349 kvm_clear_async_pf_completion_queue(vcpu); in kvm_vcpu_reset()
7350 kvm_async_pf_hash_reset(vcpu); in kvm_vcpu_reset()
7351 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
7354 kvm_pmu_reset(vcpu); in kvm_vcpu_reset()
7355 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
7358 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
7359 vcpu->arch.regs_avail = ~0; in kvm_vcpu_reset()
7360 vcpu->arch.regs_dirty = ~0; in kvm_vcpu_reset()
7362 kvm_x86_ops->vcpu_reset(vcpu, init_event); in kvm_vcpu_reset()
7365 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) in kvm_vcpu_deliver_sipi_vector() argument
7369 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
7372 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
7373 kvm_rip_write(vcpu, 0); in kvm_vcpu_deliver_sipi_vector()
7379 struct kvm_vcpu *vcpu; in kvm_arch_hardware_enable() local
7394 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
7395 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_hardware_enable()
7396 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
7397 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
7399 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
7400 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
7447 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
7448 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
7449 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
7450 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
7509 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_reset_bsp() argument
7511 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
7515 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_bsp() argument
7517 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
7520 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) in kvm_vcpu_compatible() argument
7522 return irqchip_in_kernel(vcpu->kvm) == lapic_in_kernel(vcpu); in kvm_vcpu_compatible()
7527 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_init() argument
7533 BUG_ON(vcpu->kvm == NULL); in kvm_arch_vcpu_init()
7534 kvm = vcpu->kvm; in kvm_arch_vcpu_init()
7536 vcpu->arch.pv.pv_unhalted = false; in kvm_arch_vcpu_init()
7537 vcpu->arch.emulate_ctxt.ops = &emulate_ops; in kvm_arch_vcpu_init()
7538 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_init()
7539 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_init()
7541 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_init()
7548 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_init()
7550 kvm_set_tsc_khz(vcpu, max_tsc_khz); in kvm_arch_vcpu_init()
7552 r = kvm_mmu_create(vcpu); in kvm_arch_vcpu_init()
7557 r = kvm_create_lapic(vcpu); in kvm_arch_vcpu_init()
7563 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, in kvm_arch_vcpu_init()
7565 if (!vcpu->arch.mce_banks) { in kvm_arch_vcpu_init()
7569 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_init()
7571 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { in kvm_arch_vcpu_init()
7576 fx_init(vcpu); in kvm_arch_vcpu_init()
7578 vcpu->arch.ia32_tsc_adjust_msr = 0x0; in kvm_arch_vcpu_init()
7579 vcpu->arch.pv_time_enabled = false; in kvm_arch_vcpu_init()
7581 vcpu->arch.guest_supported_xcr0 = 0; in kvm_arch_vcpu_init()
7582 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; in kvm_arch_vcpu_init()
7584 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_init()
7586 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_init()
7588 kvm_async_pf_hash_reset(vcpu); in kvm_arch_vcpu_init()
7589 kvm_pmu_init(vcpu); in kvm_arch_vcpu_init()
7591 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_init()
7596 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_init()
7598 kvm_free_lapic(vcpu); in kvm_arch_vcpu_init()
7600 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_init()
7602 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_init()
7607 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_uninit() argument
7611 kvm_pmu_destroy(vcpu); in kvm_arch_vcpu_uninit()
7612 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_uninit()
7613 kvm_free_lapic(vcpu); in kvm_arch_vcpu_uninit()
7614 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_uninit()
7615 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_uninit()
7616 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_uninit()
7617 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_uninit()
7618 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_uninit()
7622 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_sched_in() argument
7624 kvm_x86_ops->sched_in(vcpu, cpu); in kvm_arch_sched_in()
7656 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) in kvm_unload_vcpu_mmu() argument
7659 r = vcpu_load(vcpu); in kvm_unload_vcpu_mmu()
7661 kvm_mmu_unload(vcpu); in kvm_unload_vcpu_mmu()
7662 vcpu_put(vcpu); in kvm_unload_vcpu_mmu()
7668 struct kvm_vcpu *vcpu; in kvm_free_vcpus() local
7673 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_free_vcpus()
7674 kvm_clear_async_pf_completion_queue(vcpu); in kvm_free_vcpus()
7675 kvm_unload_vcpu_mmu(vcpu); in kvm_free_vcpus()
7677 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
7678 kvm_arch_vcpu_free(vcpu); in kvm_free_vcpus()
7984 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) in kvm_vcpu_has_events() argument
7986 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
7989 if (kvm_apic_has_events(vcpu)) in kvm_vcpu_has_events()
7992 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
7995 if (atomic_read(&vcpu->arch.nmi_queued)) in kvm_vcpu_has_events()
7998 if (test_bit(KVM_REQ_SMI, &vcpu->requests)) in kvm_vcpu_has_events()
8001 if (kvm_arch_interrupt_allowed(vcpu) && in kvm_vcpu_has_events()
8002 kvm_cpu_has_interrupt(vcpu)) in kvm_vcpu_has_events()
8008 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
8010 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) in kvm_arch_vcpu_runnable()
8011 kvm_x86_ops->check_nested_events(vcpu, false); in kvm_arch_vcpu_runnable()
8013 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); in kvm_arch_vcpu_runnable()
8016 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
8018 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
8021 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) in kvm_arch_interrupt_allowed() argument
8023 return kvm_x86_ops->interrupt_allowed(vcpu); in kvm_arch_interrupt_allowed()
8026 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) in kvm_get_linear_rip() argument
8028 if (is_64_bit_mode(vcpu)) in kvm_get_linear_rip()
8029 return kvm_rip_read(vcpu); in kvm_get_linear_rip()
8030 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + in kvm_get_linear_rip()
8031 kvm_rip_read(vcpu)); in kvm_get_linear_rip()
8035 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) in kvm_is_linear_rip() argument
8037 return kvm_get_linear_rip(vcpu) == linear_rip; in kvm_is_linear_rip()
8041 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) in kvm_get_rflags() argument
8045 rflags = kvm_x86_ops->get_rflags(vcpu); in kvm_get_rflags()
8046 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
8052 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in __kvm_set_rflags() argument
8054 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
8055 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
8057 kvm_x86_ops->set_rflags(vcpu, rflags); in __kvm_set_rflags()
8060 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in kvm_set_rflags() argument
8062 __kvm_set_rflags(vcpu, rflags); in kvm_set_rflags()
8063 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_set_rflags()
8067 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) in kvm_arch_async_page_ready() argument
8071 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || in kvm_arch_async_page_ready()
8075 r = kvm_mmu_reload(vcpu); in kvm_arch_async_page_ready()
8079 if (!vcpu->arch.mmu.direct_map && in kvm_arch_async_page_ready()
8080 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) in kvm_arch_async_page_ready()
8083 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); in kvm_arch_async_page_ready()
8096 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_add_async_pf_gfn() argument
8100 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
8103 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
8106 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_async_pf_gfn_slot() argument
8112 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
8113 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
8119 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_find_async_pf_gfn() argument
8121 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
8124 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_del_async_pf_gfn() argument
8128 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); in kvm_del_async_pf_gfn()
8130 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
8133 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
8135 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
8142 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
8147 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) in apf_put_user() argument
8150 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, in apf_put_user()
8154 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_not_present() argument
8160 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
8162 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || in kvm_arch_async_page_not_present()
8163 (vcpu->arch.apf.send_user_only && in kvm_arch_async_page_not_present()
8164 kvm_x86_ops->get_cpl(vcpu) == 0)) in kvm_arch_async_page_not_present()
8165 kvm_make_request(KVM_REQ_APF_HALT, vcpu); in kvm_arch_async_page_not_present()
8166 else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { in kvm_arch_async_page_not_present()
8172 kvm_inject_page_fault(vcpu, &fault); in kvm_arch_async_page_not_present()
8176 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_present() argument
8185 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
8187 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && in kvm_arch_async_page_present()
8188 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { in kvm_arch_async_page_present()
8194 kvm_inject_page_fault(vcpu, &fault); in kvm_arch_async_page_present()
8196 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
8197 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
8200 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) in kvm_arch_can_inject_async_page_present() argument
8202 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) in kvm_arch_can_inject_async_page_present()
8205 return !kvm_event_needs_reinjection(vcpu) && in kvm_arch_can_inject_async_page_present()
8206 kvm_x86_ops->interrupt_allowed(vcpu); in kvm_arch_can_inject_async_page_present()