Lines Matching refs:vcpu
89 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
90 static void process_nmi(struct kvm_vcpu *vcpu);
91 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
177 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) in kvm_async_pf_hash_reset() argument
181 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
269 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) in kvm_get_apic_base() argument
271 return vcpu->arch.apic_base; in kvm_get_apic_base()
275 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_apic_base() argument
277 u64 old_state = vcpu->arch.apic_base & in kvm_set_apic_base()
281 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | in kvm_set_apic_base()
282 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); in kvm_set_apic_base()
293 kvm_lapic_set_base(vcpu, msr_info->data); in kvm_set_apic_base()
351 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, in kvm_multiple_exception() argument
358 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_multiple_exception()
360 if (!vcpu->arch.exception.pending) { in kvm_multiple_exception()
362 if (has_error && !is_protmode(vcpu)) in kvm_multiple_exception()
364 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
365 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
366 vcpu->arch.exception.nr = nr; in kvm_multiple_exception()
367 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
368 vcpu->arch.exception.reinject = reinject; in kvm_multiple_exception()
373 prev_nr = vcpu->arch.exception.nr; in kvm_multiple_exception()
376 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_multiple_exception()
384 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
385 vcpu->arch.exception.has_error_code = true; in kvm_multiple_exception()
386 vcpu->arch.exception.nr = DF_VECTOR; in kvm_multiple_exception()
387 vcpu->arch.exception.error_code = 0; in kvm_multiple_exception()
395 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_queue_exception() argument
397 kvm_multiple_exception(vcpu, nr, false, 0, false); in kvm_queue_exception()
401 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_requeue_exception() argument
403 kvm_multiple_exception(vcpu, nr, false, 0, true); in kvm_requeue_exception()
407 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) in kvm_complete_insn_gp() argument
410 kvm_inject_gp(vcpu, 0); in kvm_complete_insn_gp()
412 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_complete_insn_gp()
416 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) in kvm_inject_page_fault() argument
418 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
419 vcpu->arch.cr2 = fault->address; in kvm_inject_page_fault()
420 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); in kvm_inject_page_fault()
424 static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) in kvm_propagate_fault() argument
426 if (mmu_is_nested(vcpu) && !fault->nested_page_fault) in kvm_propagate_fault()
427 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault()
429 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault()
434 void kvm_inject_nmi(struct kvm_vcpu *vcpu) in kvm_inject_nmi() argument
436 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
437 kvm_make_request(KVM_REQ_NMI, vcpu); in kvm_inject_nmi()
441 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_queue_exception_e() argument
443 kvm_multiple_exception(vcpu, nr, true, error_code, false); in kvm_queue_exception_e()
447 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_requeue_exception_e() argument
449 kvm_multiple_exception(vcpu, nr, true, error_code, true); in kvm_requeue_exception_e()
457 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) in kvm_require_cpl() argument
459 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) in kvm_require_cpl()
461 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in kvm_require_cpl()
466 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) in kvm_require_dr() argument
468 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) in kvm_require_dr()
471 kvm_queue_exception(vcpu, UD_VECTOR); in kvm_require_dr()
481 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in kvm_read_guest_page_mmu() argument
490 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); in kvm_read_guest_page_mmu()
496 return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); in kvm_read_guest_page_mmu()
500 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_read_nested_guest_page() argument
503 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
510 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) in load_pdptrs() argument
518 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, in load_pdptrs()
527 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { in load_pdptrs()
536 (unsigned long *)&vcpu->arch.regs_avail); in load_pdptrs()
538 (unsigned long *)&vcpu->arch.regs_dirty); in load_pdptrs()
545 static bool pdptrs_changed(struct kvm_vcpu *vcpu) in pdptrs_changed() argument
547 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; in pdptrs_changed()
553 if (is_long_mode(vcpu) || !is_pae(vcpu)) in pdptrs_changed()
557 (unsigned long *)&vcpu->arch.regs_avail)) in pdptrs_changed()
560 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; in pdptrs_changed()
561 offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); in pdptrs_changed()
562 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), in pdptrs_changed()
566 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; in pdptrs_changed()
572 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in kvm_set_cr0() argument
574 unsigned long old_cr0 = kvm_read_cr0(vcpu); in kvm_set_cr0()
593 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { in kvm_set_cr0()
595 if ((vcpu->arch.efer & EFER_LME)) { in kvm_set_cr0()
598 if (!is_pae(vcpu)) in kvm_set_cr0()
600 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
605 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr0()
606 kvm_read_cr3(vcpu))) in kvm_set_cr0()
610 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) in kvm_set_cr0()
613 kvm_x86_ops->set_cr0(vcpu, cr0); in kvm_set_cr0()
616 kvm_clear_async_pf_completion_queue(vcpu); in kvm_set_cr0()
617 kvm_async_pf_hash_reset(vcpu); in kvm_set_cr0()
621 kvm_mmu_reset_context(vcpu); in kvm_set_cr0()
626 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) in kvm_lmsw() argument
628 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); in kvm_lmsw()
632 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) in kvm_load_guest_xcr0() argument
634 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && in kvm_load_guest_xcr0()
635 !vcpu->guest_xcr0_loaded) { in kvm_load_guest_xcr0()
637 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xcr0()
638 vcpu->guest_xcr0_loaded = 1; in kvm_load_guest_xcr0()
642 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) in kvm_put_guest_xcr0() argument
644 if (vcpu->guest_xcr0_loaded) { in kvm_put_guest_xcr0()
645 if (vcpu->arch.xcr0 != host_xcr0) in kvm_put_guest_xcr0()
647 vcpu->guest_xcr0_loaded = 0; in kvm_put_guest_xcr0()
651 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) in __kvm_set_xcr() argument
654 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
670 valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; in __kvm_set_xcr()
683 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
686 kvm_update_cpuid(vcpu); in __kvm_set_xcr()
690 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) in kvm_set_xcr() argument
692 if (kvm_x86_ops->get_cpl(vcpu) != 0 || in kvm_set_xcr()
693 __kvm_set_xcr(vcpu, index, xcr)) { in kvm_set_xcr()
694 kvm_inject_gp(vcpu, 0); in kvm_set_xcr()
701 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_set_cr4() argument
703 unsigned long old_cr4 = kvm_read_cr4(vcpu); in kvm_set_cr4()
710 if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) in kvm_set_cr4()
713 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) in kvm_set_cr4()
716 if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP)) in kvm_set_cr4()
719 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) in kvm_set_cr4()
722 if (is_long_mode(vcpu)) { in kvm_set_cr4()
725 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) in kvm_set_cr4()
727 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr4()
728 kvm_read_cr3(vcpu))) in kvm_set_cr4()
732 if (!guest_cpuid_has_pcid(vcpu)) in kvm_set_cr4()
736 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) in kvm_set_cr4()
740 if (kvm_x86_ops->set_cr4(vcpu, cr4)) in kvm_set_cr4()
745 kvm_mmu_reset_context(vcpu); in kvm_set_cr4()
748 kvm_update_cpuid(vcpu); in kvm_set_cr4()
754 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) in kvm_set_cr3() argument
760 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { in kvm_set_cr3()
761 kvm_mmu_sync_roots(vcpu); in kvm_set_cr3()
762 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_set_cr3()
766 if (is_long_mode(vcpu)) { in kvm_set_cr3()
769 } else if (is_pae(vcpu) && is_paging(vcpu) && in kvm_set_cr3()
770 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3()
773 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
774 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in kvm_set_cr3()
775 kvm_mmu_new_cr3(vcpu); in kvm_set_cr3()
780 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) in kvm_set_cr8() argument
784 if (irqchip_in_kernel(vcpu->kvm)) in kvm_set_cr8()
785 kvm_lapic_set_tpr(vcpu, cr8); in kvm_set_cr8()
787 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
792 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) in kvm_get_cr8() argument
794 if (irqchip_in_kernel(vcpu->kvm)) in kvm_get_cr8()
795 return kvm_lapic_get_cr8(vcpu); in kvm_get_cr8()
797 return vcpu->arch.cr8; in kvm_get_cr8()
801 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) in kvm_update_dr0123() argument
805 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
807 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
808 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_update_dr0123()
812 static void kvm_update_dr6(struct kvm_vcpu *vcpu) in kvm_update_dr6() argument
814 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in kvm_update_dr6()
815 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); in kvm_update_dr6()
818 static void kvm_update_dr7(struct kvm_vcpu *vcpu) in kvm_update_dr7() argument
822 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
823 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
825 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
826 kvm_x86_ops->set_dr7(vcpu, dr7); in kvm_update_dr7()
827 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
829 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
832 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) in kvm_dr6_fixed() argument
836 if (!guest_cpuid_has_rtm(vcpu)) in kvm_dr6_fixed()
841 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) in __kvm_set_dr() argument
845 vcpu->arch.db[dr] = val; in __kvm_set_dr()
846 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in __kvm_set_dr()
847 vcpu->arch.eff_db[dr] = val; in __kvm_set_dr()
854 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in __kvm_set_dr()
855 kvm_update_dr6(vcpu); in __kvm_set_dr()
862 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in __kvm_set_dr()
863 kvm_update_dr7(vcpu); in __kvm_set_dr()
870 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) in kvm_set_dr() argument
872 if (__kvm_set_dr(vcpu, dr, val)) { in kvm_set_dr()
873 kvm_inject_gp(vcpu, 0); in kvm_set_dr()
880 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) in kvm_get_dr() argument
884 *val = vcpu->arch.db[dr]; in kvm_get_dr()
889 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_get_dr()
890 *val = vcpu->arch.dr6; in kvm_get_dr()
892 *val = kvm_x86_ops->get_dr6(vcpu); in kvm_get_dr()
897 *val = vcpu->arch.dr7; in kvm_get_dr()
904 bool kvm_rdpmc(struct kvm_vcpu *vcpu) in kvm_rdpmc() argument
906 u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); in kvm_rdpmc()
910 err = kvm_pmu_read_pmc(vcpu, ecx, &data); in kvm_rdpmc()
913 kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); in kvm_rdpmc()
914 kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); in kvm_rdpmc()
955 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in kvm_valid_efer() argument
963 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); in kvm_valid_efer()
971 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); in kvm_valid_efer()
980 static int set_efer(struct kvm_vcpu *vcpu, u64 efer) in set_efer() argument
982 u64 old_efer = vcpu->arch.efer; in set_efer()
984 if (!kvm_valid_efer(vcpu, efer)) in set_efer()
987 if (is_paging(vcpu) in set_efer()
988 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
992 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
994 kvm_x86_ops->set_efer(vcpu, efer); in set_efer()
998 kvm_mmu_reset_context(vcpu); in set_efer()
1014 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) in kvm_set_msr() argument
1041 return kvm_x86_ops->set_msr(vcpu, msr); in kvm_set_msr()
1048 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_set_msr() argument
1055 return kvm_set_msr(vcpu, &msr); in do_set_msr()
1099 void kvm_set_pending_timer(struct kvm_vcpu *vcpu) in kvm_set_pending_timer() argument
1106 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); in kvm_set_pending_timer()
1207 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) in nsec_to_cycles() argument
1209 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, in nsec_to_cycles()
1210 vcpu->arch.virtual_tsc_shift); in nsec_to_cycles()
1220 static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) in kvm_set_tsc_khz() argument
1231 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
1232 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
1233 vcpu->arch.virtual_tsc_khz = this_tsc_khz; in kvm_set_tsc_khz()
1247 kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling); in kvm_set_tsc_khz()
1250 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) in compute_guest_tsc() argument
1252 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
1253 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
1254 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
1255 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
1259 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) in kvm_track_tsc_matching() argument
1263 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
1267 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
1279 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_track_tsc_matching()
1281 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
1282 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
1287 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) in update_ia32_tsc_adjust_msr() argument
1289 u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); in update_ia32_tsc_adjust_msr()
1290 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; in update_ia32_tsc_adjust_msr()
1293 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) in kvm_write_tsc() argument
1295 struct kvm *kvm = vcpu->kvm; in kvm_write_tsc()
1304 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); in kvm_write_tsc()
1308 if (vcpu->arch.virtual_tsc_khz) { in kvm_write_tsc()
1314 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; in kvm_write_tsc()
1329 : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); in kvm_write_tsc()
1354 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_write_tsc()
1359 u64 delta = nsec_to_cycles(vcpu, elapsed); in kvm_write_tsc()
1361 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); in kvm_write_tsc()
1365 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_write_tsc()
1391 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_write_tsc()
1393 vcpu->arch.last_guest_tsc = data; in kvm_write_tsc()
1396 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_write_tsc()
1397 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_write_tsc()
1398 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_write_tsc()
1400 if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) in kvm_write_tsc()
1401 update_ia32_tsc_adjust_msr(vcpu, offset); in kvm_write_tsc()
1402 kvm_x86_ops->write_tsc_offset(vcpu, offset); in kvm_write_tsc()
1412 kvm_track_tsc_matching(vcpu); in kvm_write_tsc()
1570 struct kvm_vcpu *vcpu; in kvm_gen_update_masterclock() local
1578 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
1579 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_gen_update_masterclock()
1582 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
1583 clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); in kvm_gen_update_masterclock()
1592 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update() local
1640 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
1650 if (!vcpu->pv_time_enabled) in kvm_guest_time_update()
1653 if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { in kvm_guest_time_update()
1655 &vcpu->hv_clock.tsc_shift, in kvm_guest_time_update()
1656 &vcpu->hv_clock.tsc_to_system_mul); in kvm_guest_time_update()
1657 vcpu->hw_tsc_khz = this_tsc_khz; in kvm_guest_time_update()
1661 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; in kvm_guest_time_update()
1662 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
1663 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
1665 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1685 vcpu->hv_clock.version = guest_hv_clock.version + 1; in kvm_guest_time_update()
1686 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1687 &vcpu->hv_clock, in kvm_guest_time_update()
1688 sizeof(vcpu->hv_clock.version)); in kvm_guest_time_update()
1695 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_guest_time_update()
1697 vcpu->pvclock_set_guest_stopped_request = false; in kvm_guest_time_update()
1704 vcpu->hv_clock.flags = pvclock_flags; in kvm_guest_time_update()
1706 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); in kvm_guest_time_update()
1708 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1709 &vcpu->hv_clock, in kvm_guest_time_update()
1710 sizeof(vcpu->hv_clock)); in kvm_guest_time_update()
1714 vcpu->hv_clock.version++; in kvm_guest_time_update()
1715 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_guest_time_update()
1716 &vcpu->hv_clock, in kvm_guest_time_update()
1717 sizeof(vcpu->hv_clock.version)); in kvm_guest_time_update()
1744 struct kvm_vcpu *vcpu; in kvmclock_update_fn() local
1746 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_update_fn()
1747 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvmclock_update_fn()
1748 kvm_vcpu_kick(vcpu); in kvmclock_update_fn()
1809 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) in kvm_mtrr_valid() argument
1836 mask = (~0ULL) << cpuid_maxphyaddr(vcpu); in kvm_mtrr_valid()
1846 kvm_inject_gp(vcpu, 0); in kvm_mtrr_valid()
1854 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) in set_msr_mtrr() argument
1856 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; in set_msr_mtrr()
1858 if (!kvm_mtrr_valid(vcpu, msr, data)) in set_msr_mtrr()
1862 vcpu->arch.mtrr_state.def_type = data; in set_msr_mtrr()
1863 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; in set_msr_mtrr()
1871 vcpu->arch.pat = data; in set_msr_mtrr()
1880 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; in set_msr_mtrr()
1883 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; in set_msr_mtrr()
1887 kvm_mmu_reset_context(vcpu); in set_msr_mtrr()
1891 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) in set_msr_mce() argument
1893 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
1898 vcpu->arch.mcg_status = data; in set_msr_mce()
1905 vcpu->arch.mcg_ctl = data; in set_msr_mce()
1919 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
1927 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) in xen_hvm_config() argument
1929 struct kvm *kvm = vcpu->kvm; in xen_hvm_config()
1930 int lm = is_long_mode(vcpu); in xen_hvm_config()
1978 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) in set_msr_hyperv_pw() argument
1980 struct kvm *kvm = vcpu->kvm; in set_msr_hyperv_pw()
2005 kvm_x86_ops->patch_hypercall(vcpu, instructions); in set_msr_hyperv_pw()
2028 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " in set_msr_hyperv_pw()
2035 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) in set_msr_hyperv() argument
2043 vcpu->arch.hv_vapic = data; in set_msr_hyperv()
2044 if (kvm_lapic_enable_pv_eoi(vcpu, 0)) in set_msr_hyperv()
2049 addr = gfn_to_hva(vcpu->kvm, gfn); in set_msr_hyperv()
2054 vcpu->arch.hv_vapic = data; in set_msr_hyperv()
2055 mark_page_dirty(vcpu->kvm, gfn); in set_msr_hyperv()
2056 if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) in set_msr_hyperv()
2061 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); in set_msr_hyperv()
2063 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); in set_msr_hyperv()
2065 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); in set_msr_hyperv()
2067 vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " in set_msr_hyperv()
2075 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) in kvm_pv_enable_async_pf() argument
2083 vcpu->arch.apf.msr_val = data; in kvm_pv_enable_async_pf()
2086 kvm_clear_async_pf_completion_queue(vcpu); in kvm_pv_enable_async_pf()
2087 kvm_async_pf_hash_reset(vcpu); in kvm_pv_enable_async_pf()
2091 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
2095 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
2096 kvm_async_pf_wakeup_all(vcpu); in kvm_pv_enable_async_pf()
2100 static void kvmclock_reset(struct kvm_vcpu *vcpu) in kvmclock_reset() argument
2102 vcpu->arch.pv_time_enabled = false; in kvmclock_reset()
2105 static void accumulate_steal_time(struct kvm_vcpu *vcpu) in accumulate_steal_time() argument
2109 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in accumulate_steal_time()
2112 delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; in accumulate_steal_time()
2113 vcpu->arch.st.last_steal = current->sched_info.run_delay; in accumulate_steal_time()
2114 vcpu->arch.st.accum_steal = delta; in accumulate_steal_time()
2117 static void record_steal_time(struct kvm_vcpu *vcpu) in record_steal_time() argument
2119 accumulate_steal_time(vcpu); in record_steal_time()
2121 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
2124 if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2125 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) in record_steal_time()
2128 vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; in record_steal_time()
2129 vcpu->arch.st.steal.version += 2; in record_steal_time()
2130 vcpu->arch.st.accum_steal = 0; in record_steal_time()
2132 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, in record_steal_time()
2133 &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); in record_steal_time()
2136 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_msr_common() argument
2152 return set_efer(vcpu, data); in kvm_set_msr_common()
2159 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", in kvm_set_msr_common()
2166 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " in kvm_set_msr_common()
2180 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", in kvm_set_msr_common()
2184 return set_msr_mtrr(vcpu, msr, data); in kvm_set_msr_common()
2186 return kvm_set_apic_base(vcpu, msr_info); in kvm_set_msr_common()
2188 return kvm_x2apic_msr_write(vcpu, msr, data); in kvm_set_msr_common()
2190 kvm_set_lapic_tscdeadline_msr(vcpu, data); in kvm_set_msr_common()
2193 if (guest_cpuid_has_tsc_adjust(vcpu)) { in kvm_set_msr_common()
2195 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
2196 adjust_tsc_offset_guest(vcpu, adj); in kvm_set_msr_common()
2198 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
2202 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
2206 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
2207 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
2212 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_set_msr_common()
2214 kvmclock_reset(vcpu); in kvm_set_msr_common()
2216 if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) { in kvm_set_msr_common()
2221 &vcpu->requests); in kvm_set_msr_common()
2226 vcpu->arch.time = data; in kvm_set_msr_common()
2227 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_set_msr_common()
2235 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_set_msr_common()
2236 &vcpu->arch.pv_time, data & ~1ULL, in kvm_set_msr_common()
2238 vcpu->arch.pv_time_enabled = false; in kvm_set_msr_common()
2240 vcpu->arch.pv_time_enabled = true; in kvm_set_msr_common()
2245 if (kvm_pv_enable_async_pf(vcpu, data)) in kvm_set_msr_common()
2256 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, in kvm_set_msr_common()
2261 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
2266 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_set_msr_common()
2270 if (kvm_lapic_enable_pv_eoi(vcpu, data)) in kvm_set_msr_common()
2277 return set_msr_mce(vcpu, msr, data); in kvm_set_msr_common()
2291 vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " in kvm_set_msr_common()
2301 vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " in kvm_set_msr_common()
2309 if (kvm_pmu_msr(vcpu, msr)) in kvm_set_msr_common()
2310 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
2313 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " in kvm_set_msr_common()
2329 mutex_lock(&vcpu->kvm->lock); in kvm_set_msr_common()
2330 r = set_msr_hyperv_pw(vcpu, msr, data); in kvm_set_msr_common()
2331 mutex_unlock(&vcpu->kvm->lock); in kvm_set_msr_common()
2334 return set_msr_hyperv(vcpu, msr, data); in kvm_set_msr_common()
2340 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); in kvm_set_msr_common()
2343 if (!guest_cpuid_has_osvw(vcpu)) in kvm_set_msr_common()
2345 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
2348 if (!guest_cpuid_has_osvw(vcpu)) in kvm_set_msr_common()
2350 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
2353 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
2354 return xen_hvm_config(vcpu, data); in kvm_set_msr_common()
2355 if (kvm_pmu_msr(vcpu, msr)) in kvm_set_msr_common()
2356 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
2358 vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", in kvm_set_msr_common()
2362 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", in kvm_set_msr_common()
2377 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) in kvm_get_msr() argument
2379 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); in kvm_get_msr()
2383 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) in get_msr_mtrr() argument
2385 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; in get_msr_mtrr()
2391 *pdata = vcpu->arch.mtrr_state.def_type + in get_msr_mtrr()
2392 (vcpu->arch.mtrr_state.enabled << 10); in get_msr_mtrr()
2400 *pdata = vcpu->arch.pat; in get_msr_mtrr()
2409 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; in get_msr_mtrr()
2412 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; in get_msr_mtrr()
2419 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) in get_msr_mce() argument
2422 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
2431 data = vcpu->arch.mcg_cap; in get_msr_mce()
2436 data = vcpu->arch.mcg_ctl; in get_msr_mce()
2439 data = vcpu->arch.mcg_status; in get_msr_mce()
2445 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
2454 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) in get_msr_hyperv_pw() argument
2457 struct kvm *kvm = vcpu->kvm; in get_msr_hyperv_pw()
2475 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); in get_msr_hyperv_pw()
2483 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) in get_msr_hyperv() argument
2491 kvm_for_each_vcpu(r, v, vcpu->kvm) { in get_msr_hyperv()
2492 if (v == vcpu) { in get_msr_hyperv()
2500 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); in get_msr_hyperv()
2502 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); in get_msr_hyperv()
2504 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); in get_msr_hyperv()
2506 data = vcpu->arch.hv_vapic; in get_msr_hyperv()
2509 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); in get_msr_hyperv()
2516 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) in kvm_get_msr_common() argument
2549 if (kvm_pmu_msr(vcpu, msr)) in kvm_get_msr_common()
2550 return kvm_pmu_get_msr(vcpu, msr, pdata); in kvm_get_msr_common()
2560 return get_msr_mtrr(vcpu, msr, pdata); in kvm_get_msr_common()
2579 data = kvm_get_apic_base(vcpu); in kvm_get_msr_common()
2582 return kvm_x2apic_msr_read(vcpu, msr, pdata); in kvm_get_msr_common()
2585 data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
2588 data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
2591 data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
2600 data = vcpu->arch.efer; in kvm_get_msr_common()
2604 data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
2608 data = vcpu->arch.time; in kvm_get_msr_common()
2611 data = vcpu->arch.apf.msr_val; in kvm_get_msr_common()
2614 data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
2617 data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
2625 return get_msr_mce(vcpu, msr, pdata); in kvm_get_msr_common()
2641 mutex_lock(&vcpu->kvm->lock); in kvm_get_msr_common()
2642 r = get_msr_hyperv_pw(vcpu, msr, pdata); in kvm_get_msr_common()
2643 mutex_unlock(&vcpu->kvm->lock); in kvm_get_msr_common()
2646 return get_msr_hyperv(vcpu, msr, pdata); in kvm_get_msr_common()
2662 if (!guest_cpuid_has_osvw(vcpu)) in kvm_get_msr_common()
2664 data = vcpu->arch.osvw.length; in kvm_get_msr_common()
2667 if (!guest_cpuid_has_osvw(vcpu)) in kvm_get_msr_common()
2669 data = vcpu->arch.osvw.status; in kvm_get_msr_common()
2672 if (kvm_pmu_msr(vcpu, msr)) in kvm_get_msr_common()
2673 return kvm_pmu_get_msr(vcpu, msr, pdata); in kvm_get_msr_common()
2675 vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); in kvm_get_msr_common()
2678 vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); in kvm_get_msr_common()
2693 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, in __msr_io() argument
2695 int (*do_msr)(struct kvm_vcpu *vcpu, in __msr_io() argument
2700 idx = srcu_read_lock(&vcpu->kvm->srcu); in __msr_io()
2702 if (do_msr(vcpu, entries[i].index, &entries[i].data)) in __msr_io()
2704 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __msr_io()
2714 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, in msr_io() argument
2715 int (*do_msr)(struct kvm_vcpu *vcpu, in msr_io() argument
2739 r = n = __msr_io(vcpu, &msrs, entries, do_msr); in msr_io()
2917 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) in need_emulate_wbinvd() argument
2919 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
2922 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
2925 if (need_emulate_wbinvd(vcpu)) { in kvm_arch_vcpu_load()
2927 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
2928 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
2929 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
2933 kvm_x86_ops->vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
2936 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
2937 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
2938 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
2939 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
2942 if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { in kvm_arch_vcpu_load()
2943 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
2944 native_read_tsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
2948 u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, in kvm_arch_vcpu_load()
2949 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
2950 kvm_x86_ops->write_tsc_offset(vcpu, offset); in kvm_arch_vcpu_load()
2951 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
2957 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
2958 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
2959 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
2960 kvm_migrate_timers(vcpu); in kvm_arch_vcpu_load()
2961 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
2964 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_arch_vcpu_load()
2967 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
2969 kvm_x86_ops->vcpu_put(vcpu); in kvm_arch_vcpu_put()
2970 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_put()
2971 vcpu->arch.last_host_tsc = native_read_tsc(); in kvm_arch_vcpu_put()
2974 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_get_lapic() argument
2977 kvm_x86_ops->sync_pir_to_irr(vcpu); in kvm_vcpu_ioctl_get_lapic()
2978 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); in kvm_vcpu_ioctl_get_lapic()
2983 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_set_lapic() argument
2986 kvm_apic_post_state_restore(vcpu, s); in kvm_vcpu_ioctl_set_lapic()
2987 update_cr8_intercept(vcpu); in kvm_vcpu_ioctl_set_lapic()
2992 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_interrupt() argument
2997 if (irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
3000 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
3001 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
3006 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_nmi() argument
3008 kvm_inject_nmi(vcpu); in kvm_vcpu_ioctl_nmi()
3013 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, in vcpu_ioctl_tpr_access_reporting() argument
3018 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
3022 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_setup_mce() argument
3034 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
3037 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
3040 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
3045 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_mce() argument
3048 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
3050 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
3059 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
3069 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
3070 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { in kvm_vcpu_ioctl_x86_set_mce()
3071 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_set_mce()
3078 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
3080 kvm_queue_exception(vcpu, MC_VECTOR); in kvm_vcpu_ioctl_x86_set_mce()
3093 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_vcpu_events() argument
3096 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3098 vcpu->arch.exception.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
3099 !kvm_exception_is_soft(vcpu->arch.exception.nr); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3100 events->exception.nr = vcpu->arch.exception.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3101 events->exception.has_error_code = vcpu->arch.exception.has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3103 events->exception.error_code = vcpu->arch.exception.error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3106 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3107 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3109 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3111 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3112 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3113 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3123 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events() argument
3131 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
3132 vcpu->arch.exception.pending = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3133 vcpu->arch.exception.nr = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3134 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3135 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3137 vcpu->arch.interrupt.pending = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3138 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3139 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3141 kvm_x86_ops->set_interrupt_shadow(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
3144 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3146 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3147 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
3150 kvm_vcpu_has_lapic(vcpu)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
3151 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3153 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
3158 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_debugregs() argument
3163 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
3164 kvm_get_dr(vcpu, 6, &val); in kvm_vcpu_ioctl_x86_get_debugregs()
3166 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
3171 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_debugregs() argument
3182 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
3183 kvm_update_dr0123(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
3184 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
3185 kvm_update_dr6(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
3186 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
3187 kvm_update_dr7(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
3194 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) in fill_xsave() argument
3196 struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; in fill_xsave()
3230 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) in load_xsave() argument
3232 struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; in load_xsave()
3269 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xsave() argument
3274 fill_xsave((u8 *) guest_xsave->region, vcpu); in kvm_vcpu_ioctl_x86_get_xsave()
3277 &vcpu->arch.guest_fpu.state->fxsave, in kvm_vcpu_ioctl_x86_get_xsave()
3284 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xsave() argument
3298 load_xsave(vcpu, (u8 *)guest_xsave->region); in kvm_vcpu_ioctl_x86_set_xsave()
3302 memcpy(&vcpu->arch.guest_fpu.state->fxsave, in kvm_vcpu_ioctl_x86_set_xsave()
3308 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xcrs() argument
3319 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
3322 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xcrs() argument
3336 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, in kvm_vcpu_ioctl_x86_set_xcrs()
3351 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) in kvm_set_guest_paused() argument
3353 if (!vcpu->arch.pv_time_enabled) in kvm_set_guest_paused()
3355 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
3356 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_set_guest_paused()
3363 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
3377 if (!vcpu->arch.apic) in kvm_arch_vcpu_ioctl()
3384 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
3395 if (!vcpu->arch.apic) in kvm_arch_vcpu_ioctl()
3401 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
3410 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_ioctl()
3414 r = kvm_vcpu_ioctl_nmi(vcpu); in kvm_arch_vcpu_ioctl()
3424 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
3434 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
3445 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
3456 r = msr_io(vcpu, argp, kvm_get_msr, 1); in kvm_arch_vcpu_ioctl()
3459 r = msr_io(vcpu, argp, do_set_msr, 0); in kvm_arch_vcpu_ioctl()
3467 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); in kvm_arch_vcpu_ioctl()
3480 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arch_vcpu_ioctl()
3485 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); in kvm_arch_vcpu_ioctl()
3494 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); in kvm_arch_vcpu_ioctl()
3503 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); in kvm_arch_vcpu_ioctl()
3509 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
3524 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
3530 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
3547 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
3556 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
3569 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
3578 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
3592 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
3607 kvm_set_tsc_khz(vcpu, user_tsc_khz); in kvm_arch_vcpu_ioctl()
3613 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
3617 r = kvm_set_guest_paused(vcpu); in kvm_arch_vcpu_ioctl()
3628 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
4143 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, in vcpu_mmio_write() argument
4151 if (!(vcpu->arch.apic && in vcpu_mmio_write()
4152 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
4153 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_write()
4164 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) in vcpu_mmio_read() argument
4171 if (!(vcpu->arch.apic && in vcpu_mmio_read()
4172 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
4174 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_read()
4186 static void kvm_set_segment(struct kvm_vcpu *vcpu, in kvm_set_segment() argument
4189 kvm_x86_ops->set_segment(vcpu, var, seg); in kvm_set_segment()
4192 void kvm_get_segment(struct kvm_vcpu *vcpu, in kvm_get_segment() argument
4195 kvm_x86_ops->get_segment(vcpu, var, seg); in kvm_get_segment()
4198 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, in translate_nested_gpa() argument
4203 BUG_ON(!mmu_is_nested(vcpu)); in translate_nested_gpa()
4207 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa()
4212 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_read() argument
4215 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
4216 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
4219 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_fetch() argument
4222 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_fetch()
4224 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
4227 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_write() argument
4230 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
4232 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
4236 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_system() argument
4239 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
4243 struct kvm_vcpu *vcpu, u32 access, in kvm_read_guest_virt_helper() argument
4250 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper()
4258 ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data, in kvm_read_guest_virt_helper()
4278 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_fetch_guest_virt() local
4279 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
4284 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
4292 ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val, in kvm_fetch_guest_virt()
4304 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_read_guest_virt() local
4305 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
4307 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, in kvm_read_guest_virt()
4316 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_read_guest_virt_system() local
4317 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); in kvm_read_guest_virt_system()
4325 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_write_guest_virt_system() local
4330 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, in kvm_write_guest_virt_system()
4339 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); in kvm_write_guest_virt_system()
4354 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_mmio_gva_to_gpa() argument
4358 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
4361 if (vcpu_match_mmio_gva(vcpu, gva) in vcpu_mmio_gva_to_gpa()
4362 && !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
4363 vcpu->arch.access, access)) { in vcpu_mmio_gva_to_gpa()
4364 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
4370 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
4379 if (vcpu_match_mmio_gpa(vcpu, *gpa)) { in vcpu_mmio_gva_to_gpa()
4387 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, in emulator_write_phys() argument
4392 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); in emulator_write_phys()
4395 kvm_mmu_pte_write(vcpu, gpa, val, bytes); in emulator_write_phys()
4400 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
4402 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
4404 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4406 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
4411 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) in read_prepare() argument
4413 if (vcpu->mmio_read_completed) { in read_prepare()
4415 vcpu->mmio_fragments[0].gpa, *(u64 *)val); in read_prepare()
4416 vcpu->mmio_read_completed = 0; in read_prepare()
4423 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in read_emulate() argument
4426 return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); in read_emulate()
4429 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in write_emulate() argument
4432 return emulator_write_phys(vcpu, gpa, val, bytes); in write_emulate()
4435 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) in write_mmio() argument
4438 return vcpu_mmio_write(vcpu, gpa, bytes, val); in write_mmio()
4441 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in read_exit_mmio() argument
4448 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in write_exit_mmio() argument
4451 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
4453 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
4474 struct kvm_vcpu *vcpu, in emulator_read_write_onepage() argument
4482 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); in emulator_read_write_onepage()
4491 if (ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
4498 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
4506 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
4507 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
4520 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_write() local
4525 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
4528 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
4536 vcpu, ops); in emulator_read_write()
4548 vcpu, ops); in emulator_read_write()
4552 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
4555 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
4557 vcpu->mmio_needed = 1; in emulator_read_write()
4558 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
4560 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
4561 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
4562 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
4563 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
4565 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
4605 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_cmpxchg_emulated() local
4615 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); in emulator_cmpxchg_emulated()
4624 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); in emulator_cmpxchg_emulated()
4652 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); in emulator_cmpxchg_emulated()
4653 kvm_mmu_pte_write(vcpu, gpa, new, bytes); in emulator_cmpxchg_emulated()
4663 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) in kernel_pio() argument
4668 if (vcpu->arch.pio.in) in kernel_pio()
4669 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, in kernel_pio()
4670 vcpu->arch.pio.size, pd); in kernel_pio()
4672 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, in kernel_pio()
4673 vcpu->arch.pio.port, vcpu->arch.pio.size, in kernel_pio()
4678 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, in emulator_pio_in_out() argument
4682 vcpu->arch.pio.port = port; in emulator_pio_in_out()
4683 vcpu->arch.pio.in = in; in emulator_pio_in_out()
4684 vcpu->arch.pio.count = count; in emulator_pio_in_out()
4685 vcpu->arch.pio.size = size; in emulator_pio_in_out()
4687 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { in emulator_pio_in_out()
4688 vcpu->arch.pio.count = 0; in emulator_pio_in_out()
4692 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
4693 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
4694 vcpu->run->io.size = size; in emulator_pio_in_out()
4695 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
4696 vcpu->run->io.count = count; in emulator_pio_in_out()
4697 vcpu->run->io.port = port; in emulator_pio_in_out()
4706 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_pio_in_emulated() local
4709 if (vcpu->arch.pio.count) in emulator_pio_in_emulated()
4712 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); in emulator_pio_in_emulated()
4715 memcpy(val, vcpu->arch.pio_data, size * count); in emulator_pio_in_emulated()
4716 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); in emulator_pio_in_emulated()
4717 vcpu->arch.pio.count = 0; in emulator_pio_in_emulated()
4728 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_pio_out_emulated() local
4730 memcpy(vcpu->arch.pio_data, val, size * count); in emulator_pio_out_emulated()
4731 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); in emulator_pio_out_emulated()
4732 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); in emulator_pio_out_emulated()
4735 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) in get_segment_base() argument
4737 return kvm_x86_ops->get_segment_base(vcpu, seg); in get_segment_base()
4745 int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd_noskip() argument
4747 if (!need_emulate_wbinvd(vcpu)) in kvm_emulate_wbinvd_noskip()
4753 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
4754 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
4757 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
4763 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd() argument
4765 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_emulate_wbinvd()
4766 return kvm_emulate_wbinvd_noskip(vcpu); in kvm_emulate_wbinvd()
4797 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_cr() local
4802 value = kvm_read_cr0(vcpu); in emulator_get_cr()
4805 value = vcpu->arch.cr2; in emulator_get_cr()
4808 value = kvm_read_cr3(vcpu); in emulator_get_cr()
4811 value = kvm_read_cr4(vcpu); in emulator_get_cr()
4814 value = kvm_get_cr8(vcpu); in emulator_get_cr()
4826 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_cr() local
4831 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); in emulator_set_cr()
4834 vcpu->arch.cr2 = val; in emulator_set_cr()
4837 res = kvm_set_cr3(vcpu, val); in emulator_set_cr()
4840 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); in emulator_set_cr()
4843 res = kvm_set_cr8(vcpu, val); in emulator_set_cr()
4922 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_segment() local
4944 kvm_set_segment(vcpu, &var, seg); in emulator_set_segment()
5064 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) in toggle_interruptibility() argument
5066 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in toggle_interruptibility()
5077 kvm_x86_ops->set_interrupt_shadow(vcpu, mask); in toggle_interruptibility()
5079 kvm_make_request(KVM_REQ_EVENT, vcpu); in toggle_interruptibility()
5083 static bool inject_emulated_exception(struct kvm_vcpu *vcpu) in inject_emulated_exception() argument
5085 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in inject_emulated_exception()
5087 return kvm_propagate_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
5090 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
5093 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
5097 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) in init_emulate_ctxt() argument
5099 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
5102 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
5104 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
5105 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
5106 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
5108 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : in init_emulate_ctxt()
5111 ctxt->guest_mode = is_guest_mode(vcpu); in init_emulate_ctxt()
5114 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
5117 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) in kvm_inject_realmode_interrupt() argument
5119 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
5122 init_emulate_ctxt(vcpu); in kvm_inject_realmode_interrupt()
5133 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
5134 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
5137 vcpu->arch.nmi_pending = 0; in kvm_inject_realmode_interrupt()
5139 vcpu->arch.interrupt.pending = false; in kvm_inject_realmode_interrupt()
5145 static int handle_emulation_failure(struct kvm_vcpu *vcpu) in handle_emulation_failure() argument
5149 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
5150 trace_kvm_emulate_insn_failed(vcpu); in handle_emulation_failure()
5151 if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { in handle_emulation_failure()
5152 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
5153 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
5154 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
5157 kvm_queue_exception(vcpu, UD_VECTOR); in handle_emulation_failure()
5162 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, in reexecute_instruction() argument
5172 if (!vcpu->arch.mmu.direct_map) { in reexecute_instruction()
5177 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); in reexecute_instruction()
5193 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5205 if (vcpu->arch.mmu.direct_map) { in reexecute_instruction()
5208 spin_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
5209 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
5210 spin_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
5213 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5223 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
5236 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in retry_instruction() local
5239 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
5240 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
5255 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
5266 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
5267 vcpu->arch.last_retry_addr = cr2; in retry_instruction()
5269 if (!vcpu->arch.mmu.direct_map) in retry_instruction()
5270 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); in retry_instruction()
5272 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
5277 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
5278 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
5295 static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) in kvm_vcpu_check_singlestep() argument
5297 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_singlestep()
5308 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_check_singlestep()
5311 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; in kvm_vcpu_check_singlestep()
5316 vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; in kvm_vcpu_check_singlestep()
5322 vcpu->arch.dr6 &= ~15; in kvm_vcpu_check_singlestep()
5323 vcpu->arch.dr6 |= DR6_BS | DR6_RTM; in kvm_vcpu_check_singlestep()
5324 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_vcpu_check_singlestep()
5329 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) in kvm_vcpu_check_breakpoint() argument
5331 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_breakpoint()
5332 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_breakpoint()
5333 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_breakpoint()
5334 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_breakpoint()
5336 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_breakpoint()
5337 vcpu->arch.eff_db); in kvm_vcpu_check_breakpoint()
5349 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_breakpoint()
5350 !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { in kvm_vcpu_check_breakpoint()
5351 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_breakpoint()
5353 vcpu->arch.dr7, in kvm_vcpu_check_breakpoint()
5354 vcpu->arch.db); in kvm_vcpu_check_breakpoint()
5357 vcpu->arch.dr6 &= ~15; in kvm_vcpu_check_breakpoint()
5358 vcpu->arch.dr6 |= dr6 | DR6_RTM; in kvm_vcpu_check_breakpoint()
5359 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_vcpu_check_breakpoint()
5368 int x86_emulate_instruction(struct kvm_vcpu *vcpu, in x86_emulate_instruction() argument
5375 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
5377 bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
5383 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
5384 kvm_clear_exception_queue(vcpu); in x86_emulate_instruction()
5387 init_emulate_ctxt(vcpu); in x86_emulate_instruction()
5395 if (kvm_vcpu_check_breakpoint(vcpu, &r)) in x86_emulate_instruction()
5407 trace_kvm_emulate_insn_start(vcpu); in x86_emulate_instruction()
5408 ++vcpu->stat.insn_emulation; in x86_emulate_instruction()
5412 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, in x86_emulate_instruction()
5417 return handle_emulation_failure(vcpu); in x86_emulate_instruction()
5422 kvm_rip_write(vcpu, ctxt->_eip); in x86_emulate_instruction()
5424 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
5433 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
5434 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
5445 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, in x86_emulate_instruction()
5449 return handle_emulation_failure(vcpu); in x86_emulate_instruction()
5454 if (inject_emulated_exception(vcpu)) in x86_emulate_instruction()
5456 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
5457 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
5459 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
5462 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
5465 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
5466 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
5469 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
5476 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); in x86_emulate_instruction()
5477 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
5478 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
5479 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
5481 kvm_vcpu_check_singlestep(vcpu, rflags, &r); in x86_emulate_instruction()
5484 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
5493 kvm_make_request(KVM_REQ_EVENT, vcpu); in x86_emulate_instruction()
5495 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
5501 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) in kvm_fast_pio_out() argument
5503 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); in kvm_fast_pio_out()
5504 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, in kvm_fast_pio_out()
5507 vcpu->arch.pio.count = 0; in kvm_fast_pio_out()
5536 struct kvm_vcpu *vcpu; in kvmclock_cpufreq_notifier() local
5587 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_cpufreq_notifier()
5588 if (vcpu->cpu != freq->cpu) in kvmclock_cpufreq_notifier()
5590 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvmclock_cpufreq_notifier()
5591 if (vcpu->cpu != smp_processor_id()) in kvmclock_cpufreq_notifier()
5703 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) in kvm_before_handle_nmi() argument
5705 __this_cpu_write(current_vcpu, vcpu); in kvm_before_handle_nmi()
5709 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) in kvm_after_handle_nmi() argument
5750 struct kvm_vcpu *vcpu; in pvclock_gtod_update_fn() local
5755 kvm_for_each_vcpu(i, vcpu, kvm) in pvclock_gtod_update_fn()
5756 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in pvclock_gtod_update_fn()
5865 int kvm_vcpu_halt(struct kvm_vcpu *vcpu) in kvm_vcpu_halt() argument
5867 ++vcpu->stat.halt_exits; in kvm_vcpu_halt()
5868 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_halt()
5869 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_vcpu_halt()
5872 vcpu->run->exit_reason = KVM_EXIT_HLT; in kvm_vcpu_halt()
5878 int kvm_emulate_halt(struct kvm_vcpu *vcpu) in kvm_emulate_halt() argument
5880 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_emulate_halt()
5881 return kvm_vcpu_halt(vcpu); in kvm_emulate_halt()
5885 int kvm_hv_hypercall(struct kvm_vcpu *vcpu) in kvm_hv_hypercall() argument
5895 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { in kvm_hv_hypercall()
5896 kvm_queue_exception(vcpu, UD_VECTOR); in kvm_hv_hypercall()
5900 longmode = is_64_bit_mode(vcpu); in kvm_hv_hypercall()
5903 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | in kvm_hv_hypercall()
5904 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); in kvm_hv_hypercall()
5905 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | in kvm_hv_hypercall()
5906 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); in kvm_hv_hypercall()
5907 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | in kvm_hv_hypercall()
5908 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); in kvm_hv_hypercall()
5912 param = kvm_register_read(vcpu, VCPU_REGS_RCX); in kvm_hv_hypercall()
5913 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); in kvm_hv_hypercall()
5914 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); in kvm_hv_hypercall()
5927 kvm_vcpu_on_spin(vcpu); in kvm_hv_hypercall()
5936 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); in kvm_hv_hypercall()
5938 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); in kvm_hv_hypercall()
5939 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); in kvm_hv_hypercall()
5962 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) in kvm_emulate_hypercall() argument
5967 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_emulate_hypercall()
5969 if (kvm_hv_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
5970 return kvm_hv_hypercall(vcpu); in kvm_emulate_hypercall()
5972 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); in kvm_emulate_hypercall()
5973 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); in kvm_emulate_hypercall()
5974 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); in kvm_emulate_hypercall()
5975 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); in kvm_emulate_hypercall()
5976 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); in kvm_emulate_hypercall()
5980 op_64_bit = is_64_bit_mode(vcpu); in kvm_emulate_hypercall()
5989 if (kvm_x86_ops->get_cpl(vcpu) != 0) { in kvm_emulate_hypercall()
5999 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); in kvm_emulate_hypercall()
6009 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); in kvm_emulate_hypercall()
6010 ++vcpu->stat.hypercalls; in kvm_emulate_hypercall()
6017 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_fix_hypercall() local
6019 unsigned long rip = kvm_rip_read(vcpu); in emulator_fix_hypercall()
6021 kvm_x86_ops->patch_hypercall(vcpu, instruction); in emulator_fix_hypercall()
6032 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) in dm_request_for_irq_injection() argument
6034 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && in dm_request_for_irq_injection()
6035 vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
6036 kvm_arch_interrupt_allowed(vcpu)); in dm_request_for_irq_injection()
6039 static void post_kvm_run_save(struct kvm_vcpu *vcpu) in post_kvm_run_save() argument
6041 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
6043 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; in post_kvm_run_save()
6044 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
6045 kvm_run->apic_base = kvm_get_apic_base(vcpu); in post_kvm_run_save()
6046 if (irqchip_in_kernel(vcpu->kvm)) in post_kvm_run_save()
6050 kvm_arch_interrupt_allowed(vcpu) && in post_kvm_run_save()
6051 !kvm_cpu_has_interrupt(vcpu) && in post_kvm_run_save()
6052 !kvm_event_needs_reinjection(vcpu); in post_kvm_run_save()
6055 static void update_cr8_intercept(struct kvm_vcpu *vcpu) in update_cr8_intercept() argument
6062 if (!vcpu->arch.apic) in update_cr8_intercept()
6065 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
6066 max_irr = kvm_lapic_find_highest_irr(vcpu); in update_cr8_intercept()
6073 tpr = kvm_lapic_get_cr8(vcpu); in update_cr8_intercept()
6075 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); in update_cr8_intercept()
6078 static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) in inject_pending_event() argument
6083 if (vcpu->arch.exception.pending) { in inject_pending_event()
6084 trace_kvm_inj_exception(vcpu->arch.exception.nr, in inject_pending_event()
6085 vcpu->arch.exception.has_error_code, in inject_pending_event()
6086 vcpu->arch.exception.error_code); in inject_pending_event()
6088 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) in inject_pending_event()
6089 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | in inject_pending_event()
6092 if (vcpu->arch.exception.nr == DB_VECTOR && in inject_pending_event()
6093 (vcpu->arch.dr7 & DR7_GD)) { in inject_pending_event()
6094 vcpu->arch.dr7 &= ~DR7_GD; in inject_pending_event()
6095 kvm_update_dr7(vcpu); in inject_pending_event()
6098 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, in inject_pending_event()
6099 vcpu->arch.exception.has_error_code, in inject_pending_event()
6100 vcpu->arch.exception.error_code, in inject_pending_event()
6101 vcpu->arch.exception.reinject); in inject_pending_event()
6105 if (vcpu->arch.nmi_injected) { in inject_pending_event()
6106 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
6110 if (vcpu->arch.interrupt.pending) { in inject_pending_event()
6111 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
6115 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
6116 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); in inject_pending_event()
6122 if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { in inject_pending_event()
6123 --vcpu->arch.nmi_pending; in inject_pending_event()
6124 vcpu->arch.nmi_injected = true; in inject_pending_event()
6125 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
6126 } else if (kvm_cpu_has_injectable_intr(vcpu)) { in inject_pending_event()
6134 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
6135 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); in inject_pending_event()
6139 if (kvm_x86_ops->interrupt_allowed(vcpu)) { in inject_pending_event()
6140 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), in inject_pending_event()
6142 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
6148 static void process_nmi(struct kvm_vcpu *vcpu) in process_nmi() argument
6157 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
6160 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
6161 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
6162 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_nmi()
6165 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) in vcpu_scan_ioapic() argument
6170 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_scan_ioapic()
6176 kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr); in vcpu_scan_ioapic()
6177 kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); in vcpu_scan_ioapic()
6178 kvm_apic_update_tmr(vcpu, tmr); in vcpu_scan_ioapic()
6181 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb() argument
6183 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb()
6184 kvm_x86_ops->tlb_flush(vcpu); in kvm_vcpu_flush_tlb()
6187 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) in kvm_vcpu_reload_apic_access_page() argument
6191 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_reload_apic_access_page()
6197 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); in kvm_vcpu_reload_apic_access_page()
6200 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); in kvm_vcpu_reload_apic_access_page()
6226 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) in vcpu_enter_guest() argument
6229 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && in vcpu_enter_guest()
6230 vcpu->run->request_interrupt_window; in vcpu_enter_guest()
6233 if (vcpu->requests) { in vcpu_enter_guest()
6234 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) in vcpu_enter_guest()
6235 kvm_mmu_unload(vcpu); in vcpu_enter_guest()
6236 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) in vcpu_enter_guest()
6237 __kvm_migrate_timers(vcpu); in vcpu_enter_guest()
6238 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
6239 kvm_gen_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
6240 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
6241 kvm_gen_kvmclock_update(vcpu); in vcpu_enter_guest()
6242 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { in vcpu_enter_guest()
6243 r = kvm_guest_time_update(vcpu); in vcpu_enter_guest()
6247 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) in vcpu_enter_guest()
6248 kvm_mmu_sync_roots(vcpu); in vcpu_enter_guest()
6249 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in vcpu_enter_guest()
6250 kvm_vcpu_flush_tlb(vcpu); in vcpu_enter_guest()
6251 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { in vcpu_enter_guest()
6252 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
6256 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in vcpu_enter_guest()
6257 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
6261 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { in vcpu_enter_guest()
6262 vcpu->fpu_active = 0; in vcpu_enter_guest()
6263 kvm_x86_ops->fpu_deactivate(vcpu); in vcpu_enter_guest()
6265 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { in vcpu_enter_guest()
6267 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
6271 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) in vcpu_enter_guest()
6272 record_steal_time(vcpu); in vcpu_enter_guest()
6273 if (kvm_check_request(KVM_REQ_NMI, vcpu)) in vcpu_enter_guest()
6274 process_nmi(vcpu); in vcpu_enter_guest()
6275 if (kvm_check_request(KVM_REQ_PMU, vcpu)) in vcpu_enter_guest()
6276 kvm_handle_pmu_event(vcpu); in vcpu_enter_guest()
6277 if (kvm_check_request(KVM_REQ_PMI, vcpu)) in vcpu_enter_guest()
6278 kvm_deliver_pmi(vcpu); in vcpu_enter_guest()
6279 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) in vcpu_enter_guest()
6280 vcpu_scan_ioapic(vcpu); in vcpu_enter_guest()
6281 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) in vcpu_enter_guest()
6282 kvm_vcpu_reload_apic_access_page(vcpu); in vcpu_enter_guest()
6285 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { in vcpu_enter_guest()
6286 kvm_apic_accept_events(vcpu); in vcpu_enter_guest()
6287 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
6292 if (inject_pending_event(vcpu, req_int_win) != 0) in vcpu_enter_guest()
6296 if (vcpu->arch.nmi_pending) in vcpu_enter_guest()
6297 kvm_x86_ops->enable_nmi_window(vcpu); in vcpu_enter_guest()
6298 if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) in vcpu_enter_guest()
6299 kvm_x86_ops->enable_irq_window(vcpu); in vcpu_enter_guest()
6302 if (kvm_lapic_enabled(vcpu)) { in vcpu_enter_guest()
6308 kvm_x86_ops->hwapic_irr_update(vcpu, in vcpu_enter_guest()
6309 kvm_lapic_find_highest_irr(vcpu)); in vcpu_enter_guest()
6310 update_cr8_intercept(vcpu); in vcpu_enter_guest()
6311 kvm_lapic_sync_to_vapic(vcpu); in vcpu_enter_guest()
6315 r = kvm_mmu_reload(vcpu); in vcpu_enter_guest()
6322 kvm_x86_ops->prepare_guest_switch(vcpu); in vcpu_enter_guest()
6323 if (vcpu->fpu_active) in vcpu_enter_guest()
6324 kvm_load_guest_fpu(vcpu); in vcpu_enter_guest()
6325 vcpu->mode = IN_GUEST_MODE; in vcpu_enter_guest()
6327 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in vcpu_enter_guest()
6336 if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests in vcpu_enter_guest()
6338 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
6342 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
6347 kvm_load_guest_xcr0(vcpu); in vcpu_enter_guest()
6350 smp_send_reschedule(vcpu->cpu); in vcpu_enter_guest()
6354 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
6356 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
6357 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
6358 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
6359 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
6360 set_debugreg(vcpu->arch.dr6, 6); in vcpu_enter_guest()
6361 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
6364 trace_kvm_entry(vcpu->vcpu_id); in vcpu_enter_guest()
6365 wait_lapic_expire(vcpu); in vcpu_enter_guest()
6366 kvm_x86_ops->run(vcpu); in vcpu_enter_guest()
6374 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
6375 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
6376 kvm_x86_ops->sync_dirty_debug_regs(vcpu); in vcpu_enter_guest()
6377 kvm_update_dr0123(vcpu); in vcpu_enter_guest()
6378 kvm_update_dr6(vcpu); in vcpu_enter_guest()
6379 kvm_update_dr7(vcpu); in vcpu_enter_guest()
6380 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
6393 vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, in vcpu_enter_guest()
6396 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
6399 kvm_put_guest_xcr0(vcpu); in vcpu_enter_guest()
6402 kvm_x86_ops->handle_external_intr(vcpu); in vcpu_enter_guest()
6404 ++vcpu->stat.exits; in vcpu_enter_guest()
6418 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
6424 unsigned long rip = kvm_rip_read(vcpu); in vcpu_enter_guest()
6428 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
6429 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in vcpu_enter_guest()
6431 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
6432 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
6434 r = kvm_x86_ops->handle_exit(vcpu); in vcpu_enter_guest()
6438 kvm_x86_ops->cancel_injection(vcpu); in vcpu_enter_guest()
6439 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
6440 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
6445 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) in vcpu_block() argument
6447 if (!kvm_arch_vcpu_runnable(vcpu)) { in vcpu_block()
6448 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_block()
6449 kvm_vcpu_block(vcpu); in vcpu_block()
6450 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_block()
6451 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) in vcpu_block()
6455 kvm_apic_accept_events(vcpu); in vcpu_block()
6456 switch(vcpu->arch.mp_state) { in vcpu_block()
6458 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
6459 vcpu->arch.mp_state = in vcpu_block()
6462 vcpu->arch.apf.halted = false; in vcpu_block()
6473 static int vcpu_run(struct kvm_vcpu *vcpu) in vcpu_run() argument
6476 struct kvm *kvm = vcpu->kvm; in vcpu_run()
6478 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
6481 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in vcpu_run()
6482 !vcpu->arch.apf.halted) in vcpu_run()
6483 r = vcpu_enter_guest(vcpu); in vcpu_run()
6485 r = vcpu_block(kvm, vcpu); in vcpu_run()
6489 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); in vcpu_run()
6490 if (kvm_cpu_has_pending_timer(vcpu)) in vcpu_run()
6491 kvm_inject_pending_timer_irqs(vcpu); in vcpu_run()
6493 if (dm_request_for_irq_injection(vcpu)) { in vcpu_run()
6495 vcpu->run->exit_reason = KVM_EXIT_INTR; in vcpu_run()
6496 ++vcpu->stat.request_irq_exits; in vcpu_run()
6500 kvm_check_async_pf_completion(vcpu); in vcpu_run()
6504 vcpu->run->exit_reason = KVM_EXIT_INTR; in vcpu_run()
6505 ++vcpu->stat.signal_exits; in vcpu_run()
6509 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
6511 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
6515 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
6520 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) in complete_emulated_io() argument
6523 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in complete_emulated_io()
6524 r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); in complete_emulated_io()
6525 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in complete_emulated_io()
6531 static int complete_emulated_pio(struct kvm_vcpu *vcpu) in complete_emulated_pio() argument
6533 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
6535 return complete_emulated_io(vcpu); in complete_emulated_pio()
6556 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) in complete_emulated_mmio() argument
6558 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
6562 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
6565 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
6567 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
6573 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
6581 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
6582 vcpu->mmio_needed = 0; in complete_emulated_mmio()
6585 if (vcpu->mmio_is_write) in complete_emulated_mmio()
6587 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
6588 return complete_emulated_io(vcpu); in complete_emulated_mmio()
6593 if (vcpu->mmio_is_write) in complete_emulated_mmio()
6596 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
6597 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
6602 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) in kvm_arch_vcpu_ioctl_run() argument
6610 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
6611 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); in kvm_arch_vcpu_ioctl_run()
6613 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
6614 kvm_vcpu_block(vcpu); in kvm_arch_vcpu_ioctl_run()
6615 kvm_apic_accept_events(vcpu); in kvm_arch_vcpu_ioctl_run()
6616 clear_bit(KVM_REQ_UNHALT, &vcpu->requests); in kvm_arch_vcpu_ioctl_run()
6622 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
6623 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
6629 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
6630 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
6631 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
6632 r = cui(vcpu); in kvm_arch_vcpu_ioctl_run()
6636 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
6638 r = vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
6641 post_kvm_run_save(vcpu); in kvm_arch_vcpu_ioctl_run()
6642 if (vcpu->sigset_active) in kvm_arch_vcpu_ioctl_run()
6648 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
6650 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in kvm_arch_vcpu_ioctl_get_regs()
6658 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_ioctl_get_regs()
6659 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in kvm_arch_vcpu_ioctl_get_regs()
6661 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); in kvm_arch_vcpu_ioctl_get_regs()
6662 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); in kvm_arch_vcpu_ioctl_get_regs()
6663 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); in kvm_arch_vcpu_ioctl_get_regs()
6664 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); in kvm_arch_vcpu_ioctl_get_regs()
6665 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); in kvm_arch_vcpu_ioctl_get_regs()
6666 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); in kvm_arch_vcpu_ioctl_get_regs()
6667 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); in kvm_arch_vcpu_ioctl_get_regs()
6668 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); in kvm_arch_vcpu_ioctl_get_regs()
6670 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); in kvm_arch_vcpu_ioctl_get_regs()
6671 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); in kvm_arch_vcpu_ioctl_get_regs()
6672 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); in kvm_arch_vcpu_ioctl_get_regs()
6673 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); in kvm_arch_vcpu_ioctl_get_regs()
6674 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); in kvm_arch_vcpu_ioctl_get_regs()
6675 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); in kvm_arch_vcpu_ioctl_get_regs()
6676 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); in kvm_arch_vcpu_ioctl_get_regs()
6677 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); in kvm_arch_vcpu_ioctl_get_regs()
6680 regs->rip = kvm_rip_read(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
6681 regs->rflags = kvm_get_rflags(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
6686 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
6688 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in kvm_arch_vcpu_ioctl_set_regs()
6689 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in kvm_arch_vcpu_ioctl_set_regs()
6691 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); in kvm_arch_vcpu_ioctl_set_regs()
6692 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); in kvm_arch_vcpu_ioctl_set_regs()
6693 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); in kvm_arch_vcpu_ioctl_set_regs()
6694 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); in kvm_arch_vcpu_ioctl_set_regs()
6695 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); in kvm_arch_vcpu_ioctl_set_regs()
6696 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); in kvm_arch_vcpu_ioctl_set_regs()
6697 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); in kvm_arch_vcpu_ioctl_set_regs()
6698 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); in kvm_arch_vcpu_ioctl_set_regs()
6700 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); in kvm_arch_vcpu_ioctl_set_regs()
6701 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); in kvm_arch_vcpu_ioctl_set_regs()
6702 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); in kvm_arch_vcpu_ioctl_set_regs()
6703 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); in kvm_arch_vcpu_ioctl_set_regs()
6704 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); in kvm_arch_vcpu_ioctl_set_regs()
6705 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); in kvm_arch_vcpu_ioctl_set_regs()
6706 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); in kvm_arch_vcpu_ioctl_set_regs()
6707 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); in kvm_arch_vcpu_ioctl_set_regs()
6710 kvm_rip_write(vcpu, regs->rip); in kvm_arch_vcpu_ioctl_set_regs()
6711 kvm_set_rflags(vcpu, regs->rflags); in kvm_arch_vcpu_ioctl_set_regs()
6713 vcpu->arch.exception.pending = false; in kvm_arch_vcpu_ioctl_set_regs()
6715 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_arch_vcpu_ioctl_set_regs()
6720 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) in kvm_get_cs_db_l_bits() argument
6724 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_get_cs_db_l_bits()
6730 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
6735 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in kvm_arch_vcpu_ioctl_get_sregs()
6736 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in kvm_arch_vcpu_ioctl_get_sregs()
6737 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in kvm_arch_vcpu_ioctl_get_sregs()
6738 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in kvm_arch_vcpu_ioctl_get_sregs()
6739 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in kvm_arch_vcpu_ioctl_get_sregs()
6740 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in kvm_arch_vcpu_ioctl_get_sregs()
6742 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in kvm_arch_vcpu_ioctl_get_sregs()
6743 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in kvm_arch_vcpu_ioctl_get_sregs()
6745 kvm_x86_ops->get_idt(vcpu, &dt); in kvm_arch_vcpu_ioctl_get_sregs()
6748 kvm_x86_ops->get_gdt(vcpu, &dt); in kvm_arch_vcpu_ioctl_get_sregs()
6752 sregs->cr0 = kvm_read_cr0(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
6753 sregs->cr2 = vcpu->arch.cr2; in kvm_arch_vcpu_ioctl_get_sregs()
6754 sregs->cr3 = kvm_read_cr3(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
6755 sregs->cr4 = kvm_read_cr4(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
6756 sregs->cr8 = kvm_get_cr8(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
6757 sregs->efer = vcpu->arch.efer; in kvm_arch_vcpu_ioctl_get_sregs()
6758 sregs->apic_base = kvm_get_apic_base(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
6762 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) in kvm_arch_vcpu_ioctl_get_sregs()
6763 set_bit(vcpu->arch.interrupt.nr, in kvm_arch_vcpu_ioctl_get_sregs()
6769 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
6772 kvm_apic_accept_events(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
6773 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && in kvm_arch_vcpu_ioctl_get_mpstate()
6774 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
6777 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
6782 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
6785 if (!kvm_vcpu_has_lapic(vcpu) && in kvm_arch_vcpu_ioctl_set_mpstate()
6790 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
6791 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
6793 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
6794 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
6798 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, in kvm_task_switch() argument
6801 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in kvm_task_switch()
6804 init_emulate_ctxt(vcpu); in kvm_task_switch()
6812 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
6813 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
6814 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_task_switch()
6819 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
6827 if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) in kvm_arch_vcpu_ioctl_set_sregs()
6832 kvm_x86_ops->set_idt(vcpu, &dt); in kvm_arch_vcpu_ioctl_set_sregs()
6835 kvm_x86_ops->set_gdt(vcpu, &dt); in kvm_arch_vcpu_ioctl_set_sregs()
6837 vcpu->arch.cr2 = sregs->cr2; in kvm_arch_vcpu_ioctl_set_sregs()
6838 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in kvm_arch_vcpu_ioctl_set_sregs()
6839 vcpu->arch.cr3 = sregs->cr3; in kvm_arch_vcpu_ioctl_set_sregs()
6840 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in kvm_arch_vcpu_ioctl_set_sregs()
6842 kvm_set_cr8(vcpu, sregs->cr8); in kvm_arch_vcpu_ioctl_set_sregs()
6844 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in kvm_arch_vcpu_ioctl_set_sregs()
6845 kvm_x86_ops->set_efer(vcpu, sregs->efer); in kvm_arch_vcpu_ioctl_set_sregs()
6848 kvm_set_apic_base(vcpu, &apic_base_msr); in kvm_arch_vcpu_ioctl_set_sregs()
6850 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in kvm_arch_vcpu_ioctl_set_sregs()
6851 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); in kvm_arch_vcpu_ioctl_set_sregs()
6852 vcpu->arch.cr0 = sregs->cr0; in kvm_arch_vcpu_ioctl_set_sregs()
6854 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in kvm_arch_vcpu_ioctl_set_sregs()
6855 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); in kvm_arch_vcpu_ioctl_set_sregs()
6857 kvm_update_cpuid(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
6859 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_set_sregs()
6860 if (!is_long_mode(vcpu) && is_pae(vcpu)) { in kvm_arch_vcpu_ioctl_set_sregs()
6861 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in kvm_arch_vcpu_ioctl_set_sregs()
6864 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_set_sregs()
6867 kvm_mmu_reset_context(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
6873 kvm_queue_interrupt(vcpu, pending_vec, false); in kvm_arch_vcpu_ioctl_set_sregs()
6877 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in kvm_arch_vcpu_ioctl_set_sregs()
6878 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in kvm_arch_vcpu_ioctl_set_sregs()
6879 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in kvm_arch_vcpu_ioctl_set_sregs()
6880 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in kvm_arch_vcpu_ioctl_set_sregs()
6881 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in kvm_arch_vcpu_ioctl_set_sregs()
6882 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in kvm_arch_vcpu_ioctl_set_sregs()
6884 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in kvm_arch_vcpu_ioctl_set_sregs()
6885 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in kvm_arch_vcpu_ioctl_set_sregs()
6887 update_cr8_intercept(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
6890 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && in kvm_arch_vcpu_ioctl_set_sregs()
6892 !is_protmode(vcpu)) in kvm_arch_vcpu_ioctl_set_sregs()
6893 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_set_sregs()
6895 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
6900 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
6908 if (vcpu->arch.exception.pending) in kvm_arch_vcpu_ioctl_set_guest_debug()
6911 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
6913 kvm_queue_exception(vcpu, BP_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
6920 rflags = kvm_get_rflags(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
6922 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
6923 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
6924 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
6926 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
6928 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
6929 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
6932 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
6934 kvm_update_dr7(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
6936 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
6937 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + in kvm_arch_vcpu_ioctl_set_guest_debug()
6938 get_segment_base(vcpu, VCPU_SREG_CS); in kvm_arch_vcpu_ioctl_set_guest_debug()
6944 kvm_set_rflags(vcpu, rflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
6946 kvm_x86_ops->update_db_bp_intercept(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
6958 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
6965 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
6966 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); in kvm_arch_vcpu_ioctl_translate()
6967 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
6976 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
6979 &vcpu->arch.guest_fpu.state->fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
6993 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
6996 &vcpu->arch.guest_fpu.state->fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
7010 int fx_init(struct kvm_vcpu *vcpu) in fx_init() argument
7014 err = fpu_alloc(&vcpu->arch.guest_fpu); in fx_init()
7018 fpu_finit(&vcpu->arch.guest_fpu); in fx_init()
7020 vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv = in fx_init()
7026 vcpu->arch.xcr0 = XSTATE_FP; in fx_init()
7028 vcpu->arch.cr0 |= X86_CR0_ET; in fx_init()
7034 static void fx_free(struct kvm_vcpu *vcpu) in fx_free() argument
7036 fpu_free(&vcpu->arch.guest_fpu); in fx_free()
7039 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) in kvm_load_guest_fpu() argument
7041 if (vcpu->guest_fpu_loaded) in kvm_load_guest_fpu()
7049 vcpu->guest_fpu_loaded = 1; in kvm_load_guest_fpu()
7051 fpu_restore_checking(&vcpu->arch.guest_fpu); in kvm_load_guest_fpu()
7055 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) in kvm_put_guest_fpu() argument
7057 if (!vcpu->guest_fpu_loaded) in kvm_put_guest_fpu()
7060 vcpu->guest_fpu_loaded = 0; in kvm_put_guest_fpu()
7061 fpu_save_init(&vcpu->arch.guest_fpu); in kvm_put_guest_fpu()
7063 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
7064 if (!vcpu->arch.eager_fpu) in kvm_put_guest_fpu()
7065 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); in kvm_put_guest_fpu()
7070 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_free() argument
7072 kvmclock_reset(vcpu); in kvm_arch_vcpu_free()
7074 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_free()
7075 fx_free(vcpu); in kvm_arch_vcpu_free()
7076 kvm_x86_ops->vcpu_free(vcpu); in kvm_arch_vcpu_free()
7082 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_create() local
7089 vcpu = kvm_x86_ops->vcpu_create(kvm, id); in kvm_arch_vcpu_create()
7095 kvm_x86_ops->fpu_activate(vcpu); in kvm_arch_vcpu_create()
7096 return vcpu; in kvm_arch_vcpu_create()
7099 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_setup() argument
7103 vcpu->arch.mtrr_state.have_fixed = 1; in kvm_arch_vcpu_setup()
7104 r = vcpu_load(vcpu); in kvm_arch_vcpu_setup()
7107 kvm_vcpu_reset(vcpu); in kvm_arch_vcpu_setup()
7108 kvm_mmu_setup(vcpu); in kvm_arch_vcpu_setup()
7109 vcpu_put(vcpu); in kvm_arch_vcpu_setup()
7114 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
7117 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
7119 if (vcpu_load(vcpu)) in kvm_arch_vcpu_postcreate()
7124 kvm_write_tsc(vcpu, &msr); in kvm_arch_vcpu_postcreate()
7125 vcpu_put(vcpu); in kvm_arch_vcpu_postcreate()
7131 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
7134 vcpu->arch.apf.msr_val = 0; in kvm_arch_vcpu_destroy()
7136 r = vcpu_load(vcpu); in kvm_arch_vcpu_destroy()
7138 kvm_mmu_unload(vcpu); in kvm_arch_vcpu_destroy()
7139 vcpu_put(vcpu); in kvm_arch_vcpu_destroy()
7141 fx_free(vcpu); in kvm_arch_vcpu_destroy()
7142 kvm_x86_ops->vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
7145 void kvm_vcpu_reset(struct kvm_vcpu *vcpu) in kvm_vcpu_reset() argument
7147 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
7148 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
7149 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
7150 kvm_clear_interrupt_queue(vcpu); in kvm_vcpu_reset()
7151 kvm_clear_exception_queue(vcpu); in kvm_vcpu_reset()
7153 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
7154 kvm_update_dr0123(vcpu); in kvm_vcpu_reset()
7155 vcpu->arch.dr6 = DR6_INIT; in kvm_vcpu_reset()
7156 kvm_update_dr6(vcpu); in kvm_vcpu_reset()
7157 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
7158 kvm_update_dr7(vcpu); in kvm_vcpu_reset()
7160 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
7162 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_reset()
7163 vcpu->arch.apf.msr_val = 0; in kvm_vcpu_reset()
7164 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
7166 kvmclock_reset(vcpu); in kvm_vcpu_reset()
7168 kvm_clear_async_pf_completion_queue(vcpu); in kvm_vcpu_reset()
7169 kvm_async_pf_hash_reset(vcpu); in kvm_vcpu_reset()
7170 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
7172 kvm_pmu_reset(vcpu); in kvm_vcpu_reset()
7174 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
7175 vcpu->arch.regs_avail = ~0; in kvm_vcpu_reset()
7176 vcpu->arch.regs_dirty = ~0; in kvm_vcpu_reset()
7178 kvm_x86_ops->vcpu_reset(vcpu); in kvm_vcpu_reset()
7181 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) in kvm_vcpu_deliver_sipi_vector() argument
7185 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
7188 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
7189 kvm_rip_write(vcpu, 0); in kvm_vcpu_deliver_sipi_vector()
7195 struct kvm_vcpu *vcpu; in kvm_arch_hardware_enable() local
7210 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
7211 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_hardware_enable()
7212 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
7213 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
7215 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
7216 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
7263 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
7264 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
7265 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
7266 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
7311 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) in kvm_vcpu_compatible() argument
7313 return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); in kvm_vcpu_compatible()
7318 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_init() argument
7324 BUG_ON(vcpu->kvm == NULL); in kvm_arch_vcpu_init()
7325 kvm = vcpu->kvm; in kvm_arch_vcpu_init()
7327 vcpu->arch.pv.pv_unhalted = false; in kvm_arch_vcpu_init()
7328 vcpu->arch.emulate_ctxt.ops = &emulate_ops; in kvm_arch_vcpu_init()
7329 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_init()
7330 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_init()
7332 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_init()
7339 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_init()
7341 kvm_set_tsc_khz(vcpu, max_tsc_khz); in kvm_arch_vcpu_init()
7343 r = kvm_mmu_create(vcpu); in kvm_arch_vcpu_init()
7348 r = kvm_create_lapic(vcpu); in kvm_arch_vcpu_init()
7354 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, in kvm_arch_vcpu_init()
7356 if (!vcpu->arch.mce_banks) { in kvm_arch_vcpu_init()
7360 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_init()
7362 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { in kvm_arch_vcpu_init()
7367 r = fx_init(vcpu); in kvm_arch_vcpu_init()
7371 vcpu->arch.ia32_tsc_adjust_msr = 0x0; in kvm_arch_vcpu_init()
7372 vcpu->arch.pv_time_enabled = false; in kvm_arch_vcpu_init()
7374 vcpu->arch.guest_supported_xcr0 = 0; in kvm_arch_vcpu_init()
7375 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; in kvm_arch_vcpu_init()
7377 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_init()
7379 kvm_async_pf_hash_reset(vcpu); in kvm_arch_vcpu_init()
7380 kvm_pmu_init(vcpu); in kvm_arch_vcpu_init()
7384 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_init()
7386 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_init()
7388 kvm_free_lapic(vcpu); in kvm_arch_vcpu_init()
7390 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_init()
7392 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_init()
7397 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_uninit() argument
7401 kvm_pmu_destroy(vcpu); in kvm_arch_vcpu_uninit()
7402 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_uninit()
7403 kvm_free_lapic(vcpu); in kvm_arch_vcpu_uninit()
7404 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_uninit()
7405 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_uninit()
7406 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_uninit()
7407 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_uninit()
7408 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arch_vcpu_uninit()
7412 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_sched_in() argument
7414 kvm_x86_ops->sched_in(vcpu, cpu); in kvm_arch_sched_in()
7446 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) in kvm_unload_vcpu_mmu() argument
7449 r = vcpu_load(vcpu); in kvm_unload_vcpu_mmu()
7451 kvm_mmu_unload(vcpu); in kvm_unload_vcpu_mmu()
7452 vcpu_put(vcpu); in kvm_unload_vcpu_mmu()
7458 struct kvm_vcpu *vcpu; in kvm_free_vcpus() local
7463 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_free_vcpus()
7464 kvm_clear_async_pf_completion_queue(vcpu); in kvm_free_vcpus()
7465 kvm_unload_vcpu_mmu(vcpu); in kvm_free_vcpus()
7467 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
7468 kvm_arch_vcpu_free(vcpu); in kvm_free_vcpus()
7748 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
7750 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) in kvm_arch_vcpu_runnable()
7751 kvm_x86_ops->check_nested_events(vcpu, false); in kvm_arch_vcpu_runnable()
7753 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_arch_vcpu_runnable()
7754 !vcpu->arch.apf.halted) in kvm_arch_vcpu_runnable()
7755 || !list_empty_careful(&vcpu->async_pf.done) in kvm_arch_vcpu_runnable()
7756 || kvm_apic_has_events(vcpu) in kvm_arch_vcpu_runnable()
7757 || vcpu->arch.pv.pv_unhalted in kvm_arch_vcpu_runnable()
7758 || atomic_read(&vcpu->arch.nmi_queued) || in kvm_arch_vcpu_runnable()
7759 (kvm_arch_interrupt_allowed(vcpu) && in kvm_arch_vcpu_runnable()
7760 kvm_cpu_has_interrupt(vcpu)); in kvm_arch_vcpu_runnable()
7763 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
7765 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
7768 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) in kvm_arch_interrupt_allowed() argument
7770 return kvm_x86_ops->interrupt_allowed(vcpu); in kvm_arch_interrupt_allowed()
7773 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) in kvm_get_linear_rip() argument
7775 if (is_64_bit_mode(vcpu)) in kvm_get_linear_rip()
7776 return kvm_rip_read(vcpu); in kvm_get_linear_rip()
7777 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + in kvm_get_linear_rip()
7778 kvm_rip_read(vcpu)); in kvm_get_linear_rip()
7782 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) in kvm_is_linear_rip() argument
7784 return kvm_get_linear_rip(vcpu) == linear_rip; in kvm_is_linear_rip()
7788 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) in kvm_get_rflags() argument
7792 rflags = kvm_x86_ops->get_rflags(vcpu); in kvm_get_rflags()
7793 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
7799 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in __kvm_set_rflags() argument
7801 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
7802 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
7804 kvm_x86_ops->set_rflags(vcpu, rflags); in __kvm_set_rflags()
7807 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in kvm_set_rflags() argument
7809 __kvm_set_rflags(vcpu, rflags); in kvm_set_rflags()
7810 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_set_rflags()
7814 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) in kvm_arch_async_page_ready() argument
7818 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || in kvm_arch_async_page_ready()
7822 r = kvm_mmu_reload(vcpu); in kvm_arch_async_page_ready()
7826 if (!vcpu->arch.mmu.direct_map && in kvm_arch_async_page_ready()
7827 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) in kvm_arch_async_page_ready()
7830 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); in kvm_arch_async_page_ready()
7843 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_add_async_pf_gfn() argument
7847 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
7850 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
7853 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_async_pf_gfn_slot() argument
7859 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
7860 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
7866 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_find_async_pf_gfn() argument
7868 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
7871 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_del_async_pf_gfn() argument
7875 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); in kvm_del_async_pf_gfn()
7877 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
7880 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
7882 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
7889 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
7894 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) in apf_put_user() argument
7897 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, in apf_put_user()
7901 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_not_present() argument
7907 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
7909 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || in kvm_arch_async_page_not_present()
7910 (vcpu->arch.apf.send_user_only && in kvm_arch_async_page_not_present()
7911 kvm_x86_ops->get_cpl(vcpu) == 0)) in kvm_arch_async_page_not_present()
7912 kvm_make_request(KVM_REQ_APF_HALT, vcpu); in kvm_arch_async_page_not_present()
7913 else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { in kvm_arch_async_page_not_present()
7919 kvm_inject_page_fault(vcpu, &fault); in kvm_arch_async_page_not_present()
7923 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_present() argument
7932 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
7934 if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && in kvm_arch_async_page_present()
7935 !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { in kvm_arch_async_page_present()
7941 kvm_inject_page_fault(vcpu, &fault); in kvm_arch_async_page_present()
7943 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
7944 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
7947 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) in kvm_arch_can_inject_async_page_present() argument
7949 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) in kvm_arch_can_inject_async_page_present()
7952 return !kvm_event_needs_reinjection(vcpu) && in kvm_arch_can_inject_async_page_present()
7953 kvm_x86_ops->interrupt_allowed(vcpu); in kvm_arch_can_inject_async_page_present()