Lines Matching refs:apic

78 static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)  in apic_set_reg()  argument
80 *((u32 *) (apic->regs + reg_off)) = val; in apic_set_reg()
90 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_pending_eoi() local
92 return apic_test_vector(vector, apic->regs + APIC_ISR) || in kvm_apic_pending_eoi()
93 apic_test_vector(vector, apic->regs + APIC_IRR); in kvm_apic_pending_eoi()
119 static inline int apic_enabled(struct kvm_lapic *apic) in apic_enabled() argument
121 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic); in apic_enabled()
131 static inline int kvm_apic_id(struct kvm_lapic *apic) in kvm_apic_id() argument
133 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; in kvm_apic_id()
172 struct kvm_lapic *apic = vcpu->arch.apic; in recalculate_apic_map() local
179 aid = kvm_apic_id(apic); in recalculate_apic_map()
180 ldr = kvm_apic_get_reg(apic, APIC_LDR); in recalculate_apic_map()
183 new->phys_map[aid] = apic; in recalculate_apic_map()
185 if (apic_x2apic_mode(apic)) { in recalculate_apic_map()
189 if (kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT) in recalculate_apic_map()
201 new->logical_map[cid][ffs(lid) - 1] = apic; in recalculate_apic_map()
215 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) in apic_set_spiv() argument
219 apic_set_reg(apic, APIC_SPIV, val); in apic_set_spiv()
221 if (enabled != apic->sw_enabled) { in apic_set_spiv()
222 apic->sw_enabled = enabled; in apic_set_spiv()
225 recalculate_apic_map(apic->vcpu->kvm); in apic_set_spiv()
231 static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id) in kvm_apic_set_id() argument
233 apic_set_reg(apic, APIC_ID, id << 24); in kvm_apic_set_id()
234 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_id()
237 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) in kvm_apic_set_ldr() argument
239 apic_set_reg(apic, APIC_LDR, id); in kvm_apic_set_ldr()
240 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_ldr()
243 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) in apic_lvt_enabled() argument
245 return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED); in apic_lvt_enabled()
248 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) in apic_lvt_vector() argument
250 return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; in apic_lvt_vector()
253 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic) in apic_lvtt_oneshot() argument
255 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT; in apic_lvtt_oneshot()
258 static inline int apic_lvtt_period(struct kvm_lapic *apic) in apic_lvtt_period() argument
260 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC; in apic_lvtt_period()
263 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic) in apic_lvtt_tscdeadline() argument
265 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE; in apic_lvtt_tscdeadline()
275 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_version() local
282 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); in kvm_apic_set_version()
285 apic_set_reg(apic, APIC_LVR, v); in kvm_apic_set_version()
339 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_update_irr() local
341 __kvm_apic_update_irr(pir, apic->regs); in kvm_apic_update_irr()
347 static inline void apic_set_irr(int vec, struct kvm_lapic *apic) in apic_set_irr() argument
349 apic_set_vector(vec, apic->regs + APIC_IRR); in apic_set_irr()
354 apic->irr_pending = true; in apic_set_irr()
357 static inline int apic_search_irr(struct kvm_lapic *apic) in apic_search_irr() argument
359 return find_highest_vector(apic->regs + APIC_IRR); in apic_search_irr()
362 static inline int apic_find_highest_irr(struct kvm_lapic *apic) in apic_find_highest_irr() argument
370 if (!apic->irr_pending) in apic_find_highest_irr()
373 kvm_x86_ops->sync_pir_to_irr(apic->vcpu); in apic_find_highest_irr()
374 result = apic_search_irr(apic); in apic_find_highest_irr()
380 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) in apic_clear_irr() argument
384 vcpu = apic->vcpu; in apic_clear_irr()
388 apic_clear_vector(vec, apic->regs + APIC_IRR); in apic_clear_irr()
391 apic->irr_pending = false; in apic_clear_irr()
392 apic_clear_vector(vec, apic->regs + APIC_IRR); in apic_clear_irr()
393 if (apic_search_irr(apic) != -1) in apic_clear_irr()
394 apic->irr_pending = true; in apic_clear_irr()
398 static inline void apic_set_isr(int vec, struct kvm_lapic *apic) in apic_set_isr() argument
402 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) in apic_set_isr()
405 vcpu = apic->vcpu; in apic_set_isr()
415 ++apic->isr_count; in apic_set_isr()
416 BUG_ON(apic->isr_count > MAX_APIC_VECTOR); in apic_set_isr()
422 apic->highest_isr_cache = vec; in apic_set_isr()
426 static inline int apic_find_highest_isr(struct kvm_lapic *apic) in apic_find_highest_isr() argument
434 if (!apic->isr_count) in apic_find_highest_isr()
436 if (likely(apic->highest_isr_cache != -1)) in apic_find_highest_isr()
437 return apic->highest_isr_cache; in apic_find_highest_isr()
439 result = find_highest_vector(apic->regs + APIC_ISR); in apic_find_highest_isr()
445 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) in apic_clear_isr() argument
448 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR)) in apic_clear_isr()
451 vcpu = apic->vcpu; in apic_clear_isr()
462 apic_find_highest_isr(apic)); in apic_clear_isr()
464 --apic->isr_count; in apic_clear_isr()
465 BUG_ON(apic->isr_count < 0); in apic_clear_isr()
466 apic->highest_isr_cache = -1; in apic_clear_isr()
481 highest_irr = apic_find_highest_irr(vcpu->arch.apic); in kvm_lapic_find_highest_irr()
486 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
493 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_irq() local
495 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector, in kvm_apic_set_irq()
549 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_update_tmr() local
553 apic_set_reg(apic, APIC_TMR + 0x10 * i, tmr[i]); in kvm_apic_update_tmr()
556 static void apic_update_ppr(struct kvm_lapic *apic) in apic_update_ppr() argument
561 old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI); in apic_update_ppr()
562 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI); in apic_update_ppr()
563 isr = apic_find_highest_isr(apic); in apic_update_ppr()
572 apic, ppr, isr, isrv); in apic_update_ppr()
575 apic_set_reg(apic, APIC_PROCPRI, ppr); in apic_update_ppr()
577 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in apic_update_ppr()
581 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) in apic_set_tpr() argument
583 apic_set_reg(apic, APIC_TASKPRI, tpr); in apic_set_tpr()
584 apic_update_ppr(apic); in apic_set_tpr()
587 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda) in kvm_apic_broadcast() argument
589 if (apic_x2apic_mode(apic)) in kvm_apic_broadcast()
595 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda) in kvm_apic_match_physical_addr() argument
597 if (kvm_apic_broadcast(apic, mda)) in kvm_apic_match_physical_addr()
600 if (apic_x2apic_mode(apic)) in kvm_apic_match_physical_addr()
601 return mda == kvm_apic_id(apic); in kvm_apic_match_physical_addr()
603 return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic)); in kvm_apic_match_physical_addr()
606 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda) in kvm_apic_match_logical_addr() argument
610 if (kvm_apic_broadcast(apic, mda)) in kvm_apic_match_logical_addr()
613 logical_id = kvm_apic_get_reg(apic, APIC_LDR); in kvm_apic_match_logical_addr()
615 if (apic_x2apic_mode(apic)) in kvm_apic_match_logical_addr()
622 switch (kvm_apic_get_reg(apic, APIC_DFR)) { in kvm_apic_match_logical_addr()
630 apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR)); in kvm_apic_match_logical_addr()
654 struct kvm_lapic *target = vcpu->arch.apic; in kvm_apic_match_dest()
764 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, in __apic_accept_irq() argument
769 struct kvm_vcpu *vcpu = apic->vcpu; in __apic_accept_irq()
778 if (unlikely(!apic_enabled(apic))) in __apic_accept_irq()
789 apic_set_irr(vector, apic); in __apic_accept_irq()
817 apic->pending_events = (1UL << KVM_APIC_INIT); in __apic_accept_irq()
833 apic->sipi_vector = vector; in __apic_accept_irq()
836 set_bit(KVM_APIC_SIPI, &apic->pending_events); in __apic_accept_irq()
862 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) in kvm_ioapic_send_eoi() argument
864 if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { in kvm_ioapic_send_eoi()
866 if (apic_test_vector(vector, apic->regs + APIC_TMR)) in kvm_ioapic_send_eoi()
870 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode); in kvm_ioapic_send_eoi()
874 static int apic_set_eoi(struct kvm_lapic *apic) in apic_set_eoi() argument
876 int vector = apic_find_highest_isr(apic); in apic_set_eoi()
878 trace_kvm_eoi(apic, vector); in apic_set_eoi()
887 apic_clear_isr(vector, apic); in apic_set_eoi()
888 apic_update_ppr(apic); in apic_set_eoi()
890 kvm_ioapic_send_eoi(apic, vector); in apic_set_eoi()
891 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in apic_set_eoi()
901 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_eoi_accelerated() local
903 trace_kvm_eoi(apic, vector); in kvm_apic_set_eoi_accelerated()
905 kvm_ioapic_send_eoi(apic, vector); in kvm_apic_set_eoi_accelerated()
906 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in kvm_apic_set_eoi_accelerated()
910 static void apic_send_ipi(struct kvm_lapic *apic) in apic_send_ipi() argument
912 u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR); in apic_send_ipi()
913 u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2); in apic_send_ipi()
922 if (apic_x2apic_mode(apic)) in apic_send_ipi()
936 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); in apic_send_ipi()
939 static u32 apic_get_tmcct(struct kvm_lapic *apic) in apic_get_tmcct() argument
945 ASSERT(apic != NULL); in apic_get_tmcct()
948 if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 || in apic_get_tmcct()
949 apic->lapic_timer.period == 0) in apic_get_tmcct()
952 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); in apic_get_tmcct()
956 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); in apic_get_tmcct()
958 (APIC_BUS_CYCLE_NS * apic->divide_count)); in apic_get_tmcct()
963 static void __report_tpr_access(struct kvm_lapic *apic, bool write) in __report_tpr_access() argument
965 struct kvm_vcpu *vcpu = apic->vcpu; in __report_tpr_access()
973 static inline void report_tpr_access(struct kvm_lapic *apic, bool write) in report_tpr_access() argument
975 if (apic->vcpu->arch.tpr_access_reporting) in report_tpr_access()
976 __report_tpr_access(apic, write); in report_tpr_access()
979 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) in __apic_read() argument
988 if (apic_x2apic_mode(apic)) in __apic_read()
989 val = kvm_apic_id(apic); in __apic_read()
991 val = kvm_apic_id(apic) << 24; in __apic_read()
998 if (apic_lvtt_tscdeadline(apic)) in __apic_read()
1001 val = apic_get_tmcct(apic); in __apic_read()
1004 apic_update_ppr(apic); in __apic_read()
1005 val = kvm_apic_get_reg(apic, offset); in __apic_read()
1008 report_tpr_access(apic, false); in __apic_read()
1011 val = kvm_apic_get_reg(apic, offset); in __apic_read()
1023 static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len, in apic_reg_read() argument
1043 result = __apic_read(apic, offset & ~0xf); in apic_reg_read()
1061 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) in apic_mmio_in_range() argument
1063 return kvm_apic_hw_enabled(apic) && in apic_mmio_in_range()
1064 addr >= apic->base_address && in apic_mmio_in_range()
1065 addr < apic->base_address + LAPIC_MMIO_LENGTH; in apic_mmio_in_range()
1071 struct kvm_lapic *apic = to_lapic(this); in apic_mmio_read() local
1072 u32 offset = address - apic->base_address; in apic_mmio_read()
1074 if (!apic_mmio_in_range(apic, address)) in apic_mmio_read()
1077 apic_reg_read(apic, offset, len, data); in apic_mmio_read()
1082 static void update_divide_count(struct kvm_lapic *apic) in update_divide_count() argument
1086 tdcr = kvm_apic_get_reg(apic, APIC_TDCR); in update_divide_count()
1089 apic->divide_count = 0x1 << (tmp2 & 0x7); in update_divide_count()
1092 apic->divide_count); in update_divide_count()
1095 static void apic_update_lvtt(struct kvm_lapic *apic) in apic_update_lvtt() argument
1097 u32 timer_mode = kvm_apic_get_reg(apic, APIC_LVTT) & in apic_update_lvtt()
1098 apic->lapic_timer.timer_mode_mask; in apic_update_lvtt()
1100 if (apic->lapic_timer.timer_mode != timer_mode) { in apic_update_lvtt()
1101 apic->lapic_timer.timer_mode = timer_mode; in apic_update_lvtt()
1102 hrtimer_cancel(&apic->lapic_timer.timer); in apic_update_lvtt()
1106 static void apic_timer_expired(struct kvm_lapic *apic) in apic_timer_expired() argument
1108 struct kvm_vcpu *vcpu = apic->vcpu; in apic_timer_expired()
1110 struct kvm_timer *ktimer = &apic->lapic_timer; in apic_timer_expired()
1112 if (atomic_read(&apic->lapic_timer.pending)) in apic_timer_expired()
1115 atomic_inc(&apic->lapic_timer.pending); in apic_timer_expired()
1121 if (apic_lvtt_tscdeadline(apic)) in apic_timer_expired()
1132 struct kvm_lapic *apic = vcpu->arch.apic; in lapic_timer_int_injected() local
1133 u32 reg = kvm_apic_get_reg(apic, APIC_LVTT); in lapic_timer_int_injected()
1135 if (kvm_apic_hw_enabled(apic)) { in lapic_timer_int_injected()
1137 void *bitmap = apic->regs + APIC_ISR; in lapic_timer_int_injected()
1140 bitmap = apic->regs + APIC_IRR; in lapic_timer_int_injected()
1150 struct kvm_lapic *apic = vcpu->arch.apic; in wait_lapic_expire() local
1156 if (apic->lapic_timer.expired_tscdeadline == 0) in wait_lapic_expire()
1162 tsc_deadline = apic->lapic_timer.expired_tscdeadline; in wait_lapic_expire()
1163 apic->lapic_timer.expired_tscdeadline = 0; in wait_lapic_expire()
1172 static void start_apic_timer(struct kvm_lapic *apic) in start_apic_timer() argument
1176 atomic_set(&apic->lapic_timer.pending, 0); in start_apic_timer()
1178 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { in start_apic_timer()
1180 now = apic->lapic_timer.timer.base->get_time(); in start_apic_timer()
1181 apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT) in start_apic_timer()
1182 * APIC_BUS_CYCLE_NS * apic->divide_count; in start_apic_timer()
1184 if (!apic->lapic_timer.period) in start_apic_timer()
1191 if (apic_lvtt_period(apic)) { in start_apic_timer()
1194 if (apic->lapic_timer.period < min_period) { in start_apic_timer()
1198 apic->vcpu->vcpu_id, in start_apic_timer()
1199 apic->lapic_timer.period, min_period); in start_apic_timer()
1200 apic->lapic_timer.period = min_period; in start_apic_timer()
1204 hrtimer_start(&apic->lapic_timer.timer, in start_apic_timer()
1205 ktime_add_ns(now, apic->lapic_timer.period), in start_apic_timer()
1213 kvm_apic_get_reg(apic, APIC_TMICT), in start_apic_timer()
1214 apic->lapic_timer.period, in start_apic_timer()
1216 apic->lapic_timer.period))); in start_apic_timer()
1217 } else if (apic_lvtt_tscdeadline(apic)) { in start_apic_timer()
1219 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; in start_apic_timer()
1222 struct kvm_vcpu *vcpu = apic->vcpu; in start_apic_timer()
1231 now = apic->lapic_timer.timer.base->get_time(); in start_apic_timer()
1238 hrtimer_start(&apic->lapic_timer.timer, in start_apic_timer()
1241 apic_timer_expired(apic); in start_apic_timer()
1247 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) in apic_manage_nmi_watchdog() argument
1249 int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0)); in apic_manage_nmi_watchdog()
1254 "for cpu %d\n", apic->vcpu->vcpu_id); in apic_manage_nmi_watchdog()
1255 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); in apic_manage_nmi_watchdog()
1258 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); in apic_manage_nmi_watchdog()
1261 static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) in apic_reg_write() argument
1269 if (!apic_x2apic_mode(apic)) in apic_reg_write()
1270 kvm_apic_set_id(apic, val >> 24); in apic_reg_write()
1276 report_tpr_access(apic, true); in apic_reg_write()
1277 apic_set_tpr(apic, val & 0xff); in apic_reg_write()
1281 apic_set_eoi(apic); in apic_reg_write()
1285 if (!apic_x2apic_mode(apic)) in apic_reg_write()
1286 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK); in apic_reg_write()
1292 if (!apic_x2apic_mode(apic)) { in apic_reg_write()
1293 apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); in apic_reg_write()
1294 recalculate_apic_map(apic->vcpu->kvm); in apic_reg_write()
1301 if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI) in apic_reg_write()
1303 apic_set_spiv(apic, val & mask); in apic_reg_write()
1309 lvt_val = kvm_apic_get_reg(apic, in apic_reg_write()
1311 apic_set_reg(apic, APIC_LVTT + 0x10 * i, in apic_reg_write()
1314 apic_update_lvtt(apic); in apic_reg_write()
1315 atomic_set(&apic->lapic_timer.pending, 0); in apic_reg_write()
1322 apic_set_reg(apic, APIC_ICR, val & ~(1 << 12)); in apic_reg_write()
1323 apic_send_ipi(apic); in apic_reg_write()
1327 if (!apic_x2apic_mode(apic)) in apic_reg_write()
1329 apic_set_reg(apic, APIC_ICR2, val); in apic_reg_write()
1333 apic_manage_nmi_watchdog(apic, val); in apic_reg_write()
1339 if (!kvm_apic_sw_enabled(apic)) in apic_reg_write()
1343 apic_set_reg(apic, reg, val); in apic_reg_write()
1348 if (!kvm_apic_sw_enabled(apic)) in apic_reg_write()
1350 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); in apic_reg_write()
1351 apic_set_reg(apic, APIC_LVTT, val); in apic_reg_write()
1352 apic_update_lvtt(apic); in apic_reg_write()
1356 if (apic_lvtt_tscdeadline(apic)) in apic_reg_write()
1359 hrtimer_cancel(&apic->lapic_timer.timer); in apic_reg_write()
1360 apic_set_reg(apic, APIC_TMICT, val); in apic_reg_write()
1361 start_apic_timer(apic); in apic_reg_write()
1367 apic_set_reg(apic, APIC_TDCR, val); in apic_reg_write()
1368 update_divide_count(apic); in apic_reg_write()
1372 if (apic_x2apic_mode(apic) && val != 0) { in apic_reg_write()
1379 if (apic_x2apic_mode(apic)) { in apic_reg_write()
1380 apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff)); in apic_reg_write()
1396 struct kvm_lapic *apic = to_lapic(this); in apic_mmio_write() local
1397 unsigned int offset = address - apic->base_address; in apic_mmio_write()
1400 if (!apic_mmio_in_range(apic, address)) in apic_mmio_write()
1421 apic_reg_write(apic, offset & 0xff0, val); in apic_mmio_write()
1429 apic_reg_write(vcpu->arch.apic, APIC_EOI, 0); in kvm_lapic_set_eoi()
1441 apic_reg_read(vcpu->arch.apic, offset, 4, &val); in kvm_apic_write_nodecode()
1444 apic_reg_write(vcpu->arch.apic, offset, val); in kvm_apic_write_nodecode()
1450 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_free_lapic() local
1452 if (!vcpu->arch.apic) in kvm_free_lapic()
1455 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_free_lapic()
1460 if (!apic->sw_enabled) in kvm_free_lapic()
1463 if (apic->regs) in kvm_free_lapic()
1464 free_page((unsigned long)apic->regs); in kvm_free_lapic()
1466 kfree(apic); in kvm_free_lapic()
1477 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_get_lapic_tscdeadline_msr() local
1479 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || in kvm_get_lapic_tscdeadline_msr()
1480 apic_lvtt_period(apic)) in kvm_get_lapic_tscdeadline_msr()
1483 return apic->lapic_timer.tscdeadline; in kvm_get_lapic_tscdeadline_msr()
1488 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_set_lapic_tscdeadline_msr() local
1490 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || in kvm_set_lapic_tscdeadline_msr()
1491 apic_lvtt_period(apic)) in kvm_set_lapic_tscdeadline_msr()
1494 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_set_lapic_tscdeadline_msr()
1495 apic->lapic_timer.tscdeadline = data; in kvm_set_lapic_tscdeadline_msr()
1496 start_apic_timer(apic); in kvm_set_lapic_tscdeadline_msr()
1501 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_set_tpr() local
1506 apic_set_tpr(apic, ((cr8 & 0x0f) << 4) in kvm_lapic_set_tpr()
1507 | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4)); in kvm_lapic_set_tpr()
1517 tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI); in kvm_lapic_get_cr8()
1525 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_set_base() local
1527 if (!apic) { in kvm_lapic_set_base()
1546 u32 id = kvm_apic_id(apic); in kvm_lapic_set_base()
1548 kvm_apic_set_ldr(apic, ldr); in kvm_lapic_set_base()
1554 apic->base_address = apic->vcpu->arch.apic_base & in kvm_lapic_set_base()
1558 apic->base_address != APIC_DEFAULT_PHYS_BASE) in kvm_lapic_set_base()
1563 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); in kvm_lapic_set_base()
1569 struct kvm_lapic *apic; in kvm_lapic_reset() local
1575 apic = vcpu->arch.apic; in kvm_lapic_reset()
1576 ASSERT(apic != NULL); in kvm_lapic_reset()
1579 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_lapic_reset()
1581 kvm_apic_set_id(apic, vcpu->vcpu_id); in kvm_lapic_reset()
1582 kvm_apic_set_version(apic->vcpu); in kvm_lapic_reset()
1585 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); in kvm_lapic_reset()
1586 apic_update_lvtt(apic); in kvm_lapic_reset()
1587 apic_set_reg(apic, APIC_LVT0, in kvm_lapic_reset()
1590 apic_set_reg(apic, APIC_DFR, 0xffffffffU); in kvm_lapic_reset()
1591 apic_set_spiv(apic, 0xff); in kvm_lapic_reset()
1592 apic_set_reg(apic, APIC_TASKPRI, 0); in kvm_lapic_reset()
1593 kvm_apic_set_ldr(apic, 0); in kvm_lapic_reset()
1594 apic_set_reg(apic, APIC_ESR, 0); in kvm_lapic_reset()
1595 apic_set_reg(apic, APIC_ICR, 0); in kvm_lapic_reset()
1596 apic_set_reg(apic, APIC_ICR2, 0); in kvm_lapic_reset()
1597 apic_set_reg(apic, APIC_TDCR, 0); in kvm_lapic_reset()
1598 apic_set_reg(apic, APIC_TMICT, 0); in kvm_lapic_reset()
1600 apic_set_reg(apic, APIC_IRR + 0x10 * i, 0); in kvm_lapic_reset()
1601 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); in kvm_lapic_reset()
1602 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); in kvm_lapic_reset()
1604 apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); in kvm_lapic_reset()
1605 apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0; in kvm_lapic_reset()
1606 apic->highest_isr_cache = -1; in kvm_lapic_reset()
1607 update_divide_count(apic); in kvm_lapic_reset()
1608 atomic_set(&apic->lapic_timer.pending, 0); in kvm_lapic_reset()
1613 apic_update_ppr(apic); in kvm_lapic_reset()
1620 vcpu, kvm_apic_id(apic), in kvm_lapic_reset()
1621 vcpu->arch.apic_base, apic->base_address); in kvm_lapic_reset()
1630 static bool lapic_is_periodic(struct kvm_lapic *apic) in lapic_is_periodic() argument
1632 return apic_lvtt_period(apic); in lapic_is_periodic()
1637 struct kvm_lapic *apic = vcpu->arch.apic; in apic_has_pending_timer() local
1639 if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) && in apic_has_pending_timer()
1640 apic_lvt_enabled(apic, APIC_LVTT)) in apic_has_pending_timer()
1641 return atomic_read(&apic->lapic_timer.pending); in apic_has_pending_timer()
1646 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) in kvm_apic_local_deliver() argument
1648 u32 reg = kvm_apic_get_reg(apic, lvt_type); in kvm_apic_local_deliver()
1651 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { in kvm_apic_local_deliver()
1655 return __apic_accept_irq(apic, mode, vector, 1, trig_mode, in kvm_apic_local_deliver()
1663 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_nmi_wd_deliver() local
1665 if (apic) in kvm_apic_nmi_wd_deliver()
1666 kvm_apic_local_deliver(apic, APIC_LVT0); in kvm_apic_nmi_wd_deliver()
1677 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); in apic_timer_fn() local
1679 apic_timer_expired(apic); in apic_timer_fn()
1681 if (lapic_is_periodic(apic)) { in apic_timer_fn()
1690 struct kvm_lapic *apic; in kvm_create_lapic() local
1695 apic = kzalloc(sizeof(*apic), GFP_KERNEL); in kvm_create_lapic()
1696 if (!apic) in kvm_create_lapic()
1699 vcpu->arch.apic = apic; in kvm_create_lapic()
1701 apic->regs = (void *)get_zeroed_page(GFP_KERNEL); in kvm_create_lapic()
1702 if (!apic->regs) { in kvm_create_lapic()
1707 apic->vcpu = vcpu; in kvm_create_lapic()
1709 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, in kvm_create_lapic()
1711 apic->lapic_timer.timer.function = apic_timer_fn; in kvm_create_lapic()
1723 kvm_iodevice_init(&apic->dev, &apic_mmio_ops); in kvm_create_lapic()
1727 kfree(apic); in kvm_create_lapic()
1734 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_has_interrupt() local
1737 if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic)) in kvm_apic_has_interrupt()
1740 apic_update_ppr(apic); in kvm_apic_has_interrupt()
1741 highest_irr = apic_find_highest_irr(apic); in kvm_apic_has_interrupt()
1743 ((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI))) in kvm_apic_has_interrupt()
1750 u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0); in kvm_apic_accept_pic_intr()
1753 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in kvm_apic_accept_pic_intr()
1763 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_inject_apic_timer_irqs() local
1768 if (atomic_read(&apic->lapic_timer.pending) > 0) { in kvm_inject_apic_timer_irqs()
1769 kvm_apic_local_deliver(apic, APIC_LVTT); in kvm_inject_apic_timer_irqs()
1770 if (apic_lvtt_tscdeadline(apic)) in kvm_inject_apic_timer_irqs()
1771 apic->lapic_timer.tscdeadline = 0; in kvm_inject_apic_timer_irqs()
1772 atomic_set(&apic->lapic_timer.pending, 0); in kvm_inject_apic_timer_irqs()
1779 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_get_apic_interrupt() local
1791 apic_set_isr(vector, apic); in kvm_get_apic_interrupt()
1792 apic_update_ppr(apic); in kvm_get_apic_interrupt()
1793 apic_clear_irr(vector, apic); in kvm_get_apic_interrupt()
1800 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_post_state_restore() local
1804 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV))); in kvm_apic_post_state_restore()
1805 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); in kvm_apic_post_state_restore()
1807 kvm_apic_set_id(apic, kvm_apic_id(apic)); in kvm_apic_post_state_restore()
1810 apic_update_ppr(apic); in kvm_apic_post_state_restore()
1811 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_apic_post_state_restore()
1812 apic_update_lvtt(apic); in kvm_apic_post_state_restore()
1813 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); in kvm_apic_post_state_restore()
1814 update_divide_count(apic); in kvm_apic_post_state_restore()
1815 start_apic_timer(apic); in kvm_apic_post_state_restore()
1816 apic->irr_pending = true; in kvm_apic_post_state_restore()
1817 apic->isr_count = kvm_x86_ops->hwapic_isr_update ? in kvm_apic_post_state_restore()
1818 1 : count_vectors(apic->regs + APIC_ISR); in kvm_apic_post_state_restore()
1819 apic->highest_isr_cache = -1; in kvm_apic_post_state_restore()
1822 apic_find_highest_irr(apic)); in kvm_apic_post_state_restore()
1825 apic_find_highest_isr(apic)); in kvm_apic_post_state_restore()
1837 timer = &vcpu->arch.apic->lapic_timer.timer; in __kvm_migrate_apic_timer()
1850 struct kvm_lapic *apic) in apic_sync_pv_eoi_from_guest() argument
1875 vector = apic_set_eoi(apic); in apic_sync_pv_eoi_from_guest()
1876 trace_kvm_pv_eoi(apic, vector); in apic_sync_pv_eoi_from_guest()
1884 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); in kvm_lapic_sync_from_vapic()
1889 kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, in kvm_lapic_sync_from_vapic()
1892 apic_set_tpr(vcpu->arch.apic, data & 0xff); in kvm_lapic_sync_from_vapic()
1902 struct kvm_lapic *apic) in apic_sync_pv_eoi_to_guest() argument
1906 apic->irr_pending || in apic_sync_pv_eoi_to_guest()
1908 apic->highest_isr_cache == -1 || in apic_sync_pv_eoi_to_guest()
1910 kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) { in apic_sync_pv_eoi_to_guest()
1918 pv_eoi_set_pending(apic->vcpu); in apic_sync_pv_eoi_to_guest()
1925 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_sync_to_vapic() local
1927 apic_sync_pv_eoi_to_guest(vcpu, apic); in kvm_lapic_sync_to_vapic()
1932 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff; in kvm_lapic_sync_to_vapic()
1933 max_irr = apic_find_highest_irr(apic); in kvm_lapic_sync_to_vapic()
1936 max_isr = apic_find_highest_isr(apic); in kvm_lapic_sync_to_vapic()
1941 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, in kvm_lapic_sync_to_vapic()
1949 &vcpu->arch.apic->vapic_cache, in kvm_lapic_set_vapic_addr()
1957 vcpu->arch.apic->vapic_addr = vapic_addr; in kvm_lapic_set_vapic_addr()
1963 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_x2apic_msr_write() local
1966 if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) in kvm_x2apic_msr_write()
1974 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); in kvm_x2apic_msr_write()
1975 return apic_reg_write(apic, reg, (u32)data); in kvm_x2apic_msr_write()
1980 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_x2apic_msr_read() local
1983 if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) in kvm_x2apic_msr_read()
1992 if (apic_reg_read(apic, reg, 4, &low)) in kvm_x2apic_msr_read()
1995 apic_reg_read(apic, APIC_ICR2, 4, &high); in kvm_x2apic_msr_read()
2004 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_hv_vapic_msr_write() local
2011 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); in kvm_hv_vapic_msr_write()
2012 return apic_reg_write(apic, reg, (u32)data); in kvm_hv_vapic_msr_write()
2017 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_hv_vapic_msr_read() local
2023 if (apic_reg_read(apic, reg, 4, &low)) in kvm_hv_vapic_msr_read()
2026 apic_reg_read(apic, APIC_ICR2, 4, &high); in kvm_hv_vapic_msr_read()
2048 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_accept_events() local
2052 if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) in kvm_apic_accept_events()
2055 pe = xchg(&apic->pending_events, 0); in kvm_apic_accept_events()
2060 if (kvm_vcpu_is_bsp(apic->vcpu)) in kvm_apic_accept_events()
2069 sipi_vector = apic->sipi_vector; in kvm_apic_accept_events()