Lines Matching refs:vcpu
88 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector) in kvm_apic_pending_eoi() argument
90 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_pending_eoi()
161 struct kvm_vcpu *vcpu; in recalculate_apic_map() local
171 kvm_for_each_vcpu(i, vcpu, kvm) { in recalculate_apic_map()
172 struct kvm_lapic *apic = vcpu->arch.apic; in recalculate_apic_map()
176 if (!kvm_apic_present(vcpu)) in recalculate_apic_map()
225 recalculate_apic_map(apic->vcpu->kvm); in apic_set_spiv()
234 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_id()
240 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_ldr()
249 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_x2apic_id()
282 void kvm_apic_set_version(struct kvm_vcpu *vcpu) in kvm_apic_set_version() argument
284 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_version()
288 if (!kvm_vcpu_has_lapic(vcpu)) in kvm_apic_set_version()
291 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); in kvm_apic_set_version()
346 void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir) in kvm_apic_update_irr() argument
348 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_update_irr()
352 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_apic_update_irr()
382 kvm_x86_ops->sync_pir_to_irr(apic->vcpu); in apic_find_highest_irr()
391 struct kvm_vcpu *vcpu; in apic_clear_irr() local
393 vcpu = apic->vcpu; in apic_clear_irr()
395 if (unlikely(kvm_vcpu_apic_vid_enabled(vcpu))) { in apic_clear_irr()
398 kvm_make_request(KVM_REQ_EVENT, vcpu); in apic_clear_irr()
409 struct kvm_vcpu *vcpu; in apic_set_isr() local
414 vcpu = apic->vcpu; in apic_set_isr()
422 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec); in apic_set_isr()
456 struct kvm_vcpu *vcpu; in apic_clear_isr() local
460 vcpu = apic->vcpu; in apic_clear_isr()
470 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, in apic_clear_isr()
479 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) in kvm_lapic_find_highest_irr() argument
488 if (!kvm_vcpu_has_lapic(vcpu)) in kvm_lapic_find_highest_irr()
490 highest_irr = apic_find_highest_irr(vcpu->arch.apic); in kvm_lapic_find_highest_irr()
499 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, in kvm_apic_set_irq() argument
502 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_irq()
508 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val) in pv_eoi_put_user() argument
511 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, in pv_eoi_put_user()
515 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val) in pv_eoi_get_user() argument
518 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, in pv_eoi_get_user()
522 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) in pv_eoi_enabled() argument
524 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; in pv_eoi_enabled()
527 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) in pv_eoi_get_pending() argument
530 if (pv_eoi_get_user(vcpu, &val) < 0) in pv_eoi_get_pending()
532 (unsigned long long)vcpu->arch.pv_eoi.msr_val); in pv_eoi_get_pending()
536 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) in pv_eoi_set_pending() argument
538 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) { in pv_eoi_set_pending()
540 (unsigned long long)vcpu->arch.pv_eoi.msr_val); in pv_eoi_set_pending()
543 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); in pv_eoi_set_pending()
546 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) in pv_eoi_clr_pending() argument
548 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { in pv_eoi_clr_pending()
550 (unsigned long long)vcpu->arch.pv_eoi.msr_val); in pv_eoi_clr_pending()
553 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); in pv_eoi_clr_pending()
577 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in apic_update_ppr()
630 apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR)); in kvm_apic_match_logical_addr()
651 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, in kvm_apic_match_dest() argument
654 struct kvm_lapic *target = vcpu->arch.apic; in kvm_apic_match_dest()
693 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map); in kvm_irq_delivery_to_apic_fast()
740 else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0) in kvm_irq_delivery_to_apic_fast()
753 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map); in kvm_irq_delivery_to_apic_fast()
784 if (dst && kvm_apic_present(dst->vcpu)) in kvm_intr_is_single_vcpu_fast()
785 *dest_vcpu = dst->vcpu; in kvm_intr_is_single_vcpu_fast()
807 if (dst && kvm_apic_present(dst->vcpu)) in kvm_intr_is_single_vcpu_fast()
808 *dest_vcpu = dst->vcpu; in kvm_intr_is_single_vcpu_fast()
828 struct kvm_vcpu *vcpu = apic->vcpu; in __apic_accept_irq() local
830 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, in __apic_accept_irq()
834 vcpu->arch.apic_arb_prio++; in __apic_accept_irq()
846 __set_bit(vcpu->vcpu_id, dest_map); in __apic_accept_irq()
856 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); in __apic_accept_irq()
860 kvm_make_request(KVM_REQ_EVENT, vcpu); in __apic_accept_irq()
861 kvm_vcpu_kick(vcpu); in __apic_accept_irq()
867 vcpu->arch.pv.pv_unhalted = 1; in __apic_accept_irq()
868 kvm_make_request(KVM_REQ_EVENT, vcpu); in __apic_accept_irq()
869 kvm_vcpu_kick(vcpu); in __apic_accept_irq()
874 kvm_make_request(KVM_REQ_SMI, vcpu); in __apic_accept_irq()
875 kvm_vcpu_kick(vcpu); in __apic_accept_irq()
880 kvm_inject_nmi(vcpu); in __apic_accept_irq()
881 kvm_vcpu_kick(vcpu); in __apic_accept_irq()
892 kvm_make_request(KVM_REQ_EVENT, vcpu); in __apic_accept_irq()
893 kvm_vcpu_kick(vcpu); in __apic_accept_irq()
896 vcpu->vcpu_id); in __apic_accept_irq()
902 vcpu->vcpu_id, vector); in __apic_accept_irq()
908 kvm_make_request(KVM_REQ_EVENT, vcpu); in __apic_accept_irq()
909 kvm_vcpu_kick(vcpu); in __apic_accept_irq()
935 return test_bit(vector, (ulong *)apic->vcpu->arch.eoi_exit_bitmap); in kvm_ioapic_handles_vector()
947 if (irqchip_split(apic->vcpu->kvm)) { in kvm_ioapic_send_eoi()
948 apic->vcpu->arch.pending_ioapic_eoi = vector; in kvm_ioapic_send_eoi()
949 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu); in kvm_ioapic_send_eoi()
958 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode); in kvm_ioapic_send_eoi()
978 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in apic_set_eoi()
986 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector) in kvm_apic_set_eoi_accelerated() argument
988 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_eoi_accelerated()
993 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in kvm_apic_set_eoi_accelerated()
1025 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); in apic_send_ipi()
1054 struct kvm_vcpu *vcpu = apic->vcpu; in __report_tpr_access() local
1055 struct kvm_run *run = vcpu->run; in __report_tpr_access()
1057 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu); in __report_tpr_access()
1058 run->tpr_access.rip = kvm_rip_read(vcpu); in __report_tpr_access()
1064 if (apic->vcpu->arch.tpr_access_reporting) in report_tpr_access()
1157 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, in apic_mmio_read() argument
1197 struct kvm_vcpu *vcpu = apic->vcpu; in apic_timer_expired() local
1198 wait_queue_head_t *q = &vcpu->wq; in apic_timer_expired()
1205 kvm_set_pending_timer(vcpu); in apic_timer_expired()
1219 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu) in lapic_timer_int_injected() argument
1221 struct kvm_lapic *apic = vcpu->arch.apic; in lapic_timer_int_injected()
1237 void wait_lapic_expire(struct kvm_vcpu *vcpu) in wait_lapic_expire() argument
1239 struct kvm_lapic *apic = vcpu->arch.apic; in wait_lapic_expire()
1242 if (!kvm_vcpu_has_lapic(vcpu)) in wait_lapic_expire()
1248 if (!lapic_timer_int_injected(vcpu)) in wait_lapic_expire()
1253 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in wait_lapic_expire()
1254 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline); in wait_lapic_expire()
1287 apic->vcpu->vcpu_id, in start_apic_timer()
1311 struct kvm_vcpu *vcpu = apic->vcpu; in start_apic_timer() local
1312 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; in start_apic_timer()
1321 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in start_apic_timer()
1344 "for cpu %d\n", apic->vcpu->vcpu_id); in apic_manage_nmi_watchdog()
1345 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); in apic_manage_nmi_watchdog()
1347 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); in apic_manage_nmi_watchdog()
1384 recalculate_apic_map(apic->vcpu->kvm); in apic_reg_write()
1483 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, in apic_mmio_write() argument
1516 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) in kvm_lapic_set_eoi() argument
1518 if (kvm_vcpu_has_lapic(vcpu)) in kvm_lapic_set_eoi()
1519 apic_reg_write(vcpu->arch.apic, APIC_EOI, 0); in kvm_lapic_set_eoi()
1524 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) in kvm_apic_write_nodecode() argument
1531 apic_reg_read(vcpu->arch.apic, offset, 4, &val); in kvm_apic_write_nodecode()
1534 apic_reg_write(vcpu->arch.apic, offset, val); in kvm_apic_write_nodecode()
1538 void kvm_free_lapic(struct kvm_vcpu *vcpu) in kvm_free_lapic() argument
1540 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_free_lapic()
1542 if (!vcpu->arch.apic) in kvm_free_lapic()
1547 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) in kvm_free_lapic()
1565 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) in kvm_get_lapic_tscdeadline_msr() argument
1567 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_get_lapic_tscdeadline_msr()
1569 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || in kvm_get_lapic_tscdeadline_msr()
1576 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) in kvm_set_lapic_tscdeadline_msr() argument
1578 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_set_lapic_tscdeadline_msr()
1580 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || in kvm_set_lapic_tscdeadline_msr()
1589 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) in kvm_lapic_set_tpr() argument
1591 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_set_tpr()
1593 if (!kvm_vcpu_has_lapic(vcpu)) in kvm_lapic_set_tpr()
1600 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) in kvm_lapic_get_cr8() argument
1604 if (!kvm_vcpu_has_lapic(vcpu)) in kvm_lapic_get_cr8()
1607 tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI); in kvm_lapic_get_cr8()
1612 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) in kvm_lapic_set_base() argument
1614 u64 old_value = vcpu->arch.apic_base; in kvm_lapic_set_base()
1615 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_set_base()
1619 vcpu->arch.apic_base = value; in kvm_lapic_set_base()
1623 vcpu->arch.apic_base = value; in kvm_lapic_set_base()
1631 recalculate_apic_map(vcpu->kvm); in kvm_lapic_set_base()
1636 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); in kvm_lapic_set_base()
1637 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true); in kvm_lapic_set_base()
1639 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false); in kvm_lapic_set_base()
1642 apic->base_address = apic->vcpu->arch.apic_base & in kvm_lapic_set_base()
1651 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); in kvm_lapic_set_base()
1655 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) in kvm_lapic_reset() argument
1662 ASSERT(vcpu); in kvm_lapic_reset()
1663 apic = vcpu->arch.apic; in kvm_lapic_reset()
1670 kvm_apic_set_id(apic, vcpu->vcpu_id); in kvm_lapic_reset()
1671 kvm_apic_set_version(apic->vcpu); in kvm_lapic_reset()
1676 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) in kvm_lapic_reset()
1696 apic->irr_pending = kvm_vcpu_apic_vid_enabled(vcpu); in kvm_lapic_reset()
1701 if (kvm_vcpu_is_bsp(vcpu)) in kvm_lapic_reset()
1702 kvm_lapic_set_base(vcpu, in kvm_lapic_reset()
1703 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP); in kvm_lapic_reset()
1704 vcpu->arch.pv_eoi.msr_val = 0; in kvm_lapic_reset()
1707 vcpu->arch.apic_arb_prio = 0; in kvm_lapic_reset()
1708 vcpu->arch.apic_attention = 0; in kvm_lapic_reset()
1712 vcpu, kvm_apic_id(apic), in kvm_lapic_reset()
1713 vcpu->arch.apic_base, apic->base_address); in kvm_lapic_reset()
1727 int apic_has_pending_timer(struct kvm_vcpu *vcpu) in apic_has_pending_timer() argument
1729 struct kvm_lapic *apic = vcpu->arch.apic; in apic_has_pending_timer()
1731 if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) && in apic_has_pending_timer()
1753 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu) in kvm_apic_nmi_wd_deliver() argument
1755 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_nmi_wd_deliver()
1780 int kvm_create_lapic(struct kvm_vcpu *vcpu) in kvm_create_lapic() argument
1784 ASSERT(vcpu != NULL); in kvm_create_lapic()
1785 apic_debug("apic_init %d\n", vcpu->vcpu_id); in kvm_create_lapic()
1791 vcpu->arch.apic = apic; in kvm_create_lapic()
1796 vcpu->vcpu_id); in kvm_create_lapic()
1799 apic->vcpu = vcpu; in kvm_create_lapic()
1809 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; in kvm_create_lapic()
1810 kvm_lapic_set_base(vcpu, in kvm_create_lapic()
1814 kvm_lapic_reset(vcpu, false); in kvm_create_lapic()
1824 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) in kvm_apic_has_interrupt() argument
1826 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_has_interrupt()
1829 if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic)) in kvm_apic_has_interrupt()
1840 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) in kvm_apic_accept_pic_intr() argument
1842 u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0); in kvm_apic_accept_pic_intr()
1845 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in kvm_apic_accept_pic_intr()
1853 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) in kvm_inject_apic_timer_irqs() argument
1855 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_inject_apic_timer_irqs()
1857 if (!kvm_vcpu_has_lapic(vcpu)) in kvm_inject_apic_timer_irqs()
1868 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) in kvm_get_apic_interrupt() argument
1870 int vector = kvm_apic_has_interrupt(vcpu); in kvm_get_apic_interrupt()
1871 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_get_apic_interrupt()
1889 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, in kvm_apic_post_state_restore() argument
1892 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_post_state_restore()
1894 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); in kvm_apic_post_state_restore()
1897 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); in kvm_apic_post_state_restore()
1900 kvm_apic_set_version(vcpu); in kvm_apic_post_state_restore()
1913 kvm_x86_ops->hwapic_irr_update(vcpu, in kvm_apic_post_state_restore()
1916 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, in kvm_apic_post_state_restore()
1918 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_apic_post_state_restore()
1919 if (ioapic_in_kernel(vcpu->kvm)) in kvm_apic_post_state_restore()
1920 kvm_rtc_eoi_tracking_restore_one(vcpu); in kvm_apic_post_state_restore()
1922 vcpu->arch.apic_arb_prio = 0; in kvm_apic_post_state_restore()
1925 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) in __kvm_migrate_apic_timer() argument
1929 if (!kvm_vcpu_has_lapic(vcpu)) in __kvm_migrate_apic_timer()
1932 timer = &vcpu->arch.apic->lapic_timer.timer; in __kvm_migrate_apic_timer()
1944 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, in apic_sync_pv_eoi_from_guest() argument
1960 BUG_ON(!pv_eoi_enabled(vcpu)); in apic_sync_pv_eoi_from_guest()
1961 pending = pv_eoi_get_pending(vcpu); in apic_sync_pv_eoi_from_guest()
1967 pv_eoi_clr_pending(vcpu); in apic_sync_pv_eoi_from_guest()
1974 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) in kvm_lapic_sync_from_vapic() argument
1978 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) in kvm_lapic_sync_from_vapic()
1979 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); in kvm_lapic_sync_from_vapic()
1981 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) in kvm_lapic_sync_from_vapic()
1984 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, in kvm_lapic_sync_from_vapic()
1988 apic_set_tpr(vcpu->arch.apic, data & 0xff); in kvm_lapic_sync_from_vapic()
1997 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu, in apic_sync_pv_eoi_to_guest() argument
2000 if (!pv_eoi_enabled(vcpu) || in apic_sync_pv_eoi_to_guest()
2014 pv_eoi_set_pending(apic->vcpu); in apic_sync_pv_eoi_to_guest()
2017 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) in kvm_lapic_sync_to_vapic() argument
2021 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_sync_to_vapic()
2023 apic_sync_pv_eoi_to_guest(vcpu, apic); in kvm_lapic_sync_to_vapic()
2025 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) in kvm_lapic_sync_to_vapic()
2037 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, in kvm_lapic_sync_to_vapic()
2041 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) in kvm_lapic_set_vapic_addr() argument
2044 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_lapic_set_vapic_addr()
2045 &vcpu->arch.apic->vapic_cache, in kvm_lapic_set_vapic_addr()
2048 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); in kvm_lapic_set_vapic_addr()
2050 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); in kvm_lapic_set_vapic_addr()
2053 vcpu->arch.apic->vapic_addr = vapic_addr; in kvm_lapic_set_vapic_addr()
2057 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) in kvm_x2apic_msr_write() argument
2059 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_x2apic_msr_write()
2062 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) in kvm_x2apic_msr_write()
2074 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) in kvm_x2apic_msr_read() argument
2076 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_x2apic_msr_read()
2079 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) in kvm_x2apic_msr_read()
2098 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) in kvm_hv_vapic_msr_write() argument
2100 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_hv_vapic_msr_write()
2102 if (!kvm_vcpu_has_lapic(vcpu)) in kvm_hv_vapic_msr_write()
2111 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) in kvm_hv_vapic_msr_read() argument
2113 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_hv_vapic_msr_read()
2116 if (!kvm_vcpu_has_lapic(vcpu)) in kvm_hv_vapic_msr_read()
2129 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) in kvm_lapic_enable_pv_eoi() argument
2135 vcpu->arch.pv_eoi.msr_val = data; in kvm_lapic_enable_pv_eoi()
2136 if (!pv_eoi_enabled(vcpu)) in kvm_lapic_enable_pv_eoi()
2138 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, in kvm_lapic_enable_pv_eoi()
2142 void kvm_apic_accept_events(struct kvm_vcpu *vcpu) in kvm_apic_accept_events() argument
2144 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_accept_events()
2148 if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) in kvm_apic_accept_events()
2156 if (is_smm(vcpu)) { in kvm_apic_accept_events()
2157 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED); in kvm_apic_accept_events()
2165 kvm_lapic_reset(vcpu, true); in kvm_apic_accept_events()
2166 kvm_vcpu_reset(vcpu, true); in kvm_apic_accept_events()
2167 if (kvm_vcpu_is_bsp(apic->vcpu)) in kvm_apic_accept_events()
2168 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_apic_accept_events()
2170 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_apic_accept_events()
2173 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in kvm_apic_accept_events()
2178 vcpu->vcpu_id, sipi_vector); in kvm_apic_accept_events()
2179 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); in kvm_apic_accept_events()
2180 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_apic_accept_events()