Lines Matching refs:apic

78 static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)  in apic_set_reg()  argument
80 *((u32 *) (apic->regs + reg_off)) = val; in apic_set_reg()
90 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_pending_eoi() local
92 return apic_test_vector(vector, apic->regs + APIC_ISR) || in kvm_apic_pending_eoi()
93 apic_test_vector(vector, apic->regs + APIC_IRR); in kvm_apic_pending_eoi()
119 static inline int apic_enabled(struct kvm_lapic *apic) in apic_enabled() argument
121 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic); in apic_enabled()
131 static inline int kvm_apic_id(struct kvm_lapic *apic) in kvm_apic_id() argument
133 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; in kvm_apic_id()
172 struct kvm_lapic *apic = vcpu->arch.apic; in recalculate_apic_map() local
179 aid = kvm_apic_id(apic); in recalculate_apic_map()
180 ldr = kvm_apic_get_reg(apic, APIC_LDR); in recalculate_apic_map()
183 new->phys_map[aid] = apic; in recalculate_apic_map()
185 if (apic_x2apic_mode(apic)) { in recalculate_apic_map()
189 if (kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT) in recalculate_apic_map()
201 new->logical_map[cid][ffs(lid) - 1] = apic; in recalculate_apic_map()
215 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) in apic_set_spiv() argument
219 apic_set_reg(apic, APIC_SPIV, val); in apic_set_spiv()
221 if (enabled != apic->sw_enabled) { in apic_set_spiv()
222 apic->sw_enabled = enabled; in apic_set_spiv()
225 recalculate_apic_map(apic->vcpu->kvm); in apic_set_spiv()
231 static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id) in kvm_apic_set_id() argument
233 apic_set_reg(apic, APIC_ID, id << 24); in kvm_apic_set_id()
234 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_id()
237 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) in kvm_apic_set_ldr() argument
239 apic_set_reg(apic, APIC_LDR, id); in kvm_apic_set_ldr()
240 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_ldr()
243 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u8 id) in kvm_apic_set_x2apic_id() argument
247 apic_set_reg(apic, APIC_ID, id << 24); in kvm_apic_set_x2apic_id()
248 apic_set_reg(apic, APIC_LDR, ldr); in kvm_apic_set_x2apic_id()
249 recalculate_apic_map(apic->vcpu->kvm); in kvm_apic_set_x2apic_id()
252 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) in apic_lvt_enabled() argument
254 return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED); in apic_lvt_enabled()
257 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) in apic_lvt_vector() argument
259 return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; in apic_lvt_vector()
262 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic) in apic_lvtt_oneshot() argument
264 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT; in apic_lvtt_oneshot()
267 static inline int apic_lvtt_period(struct kvm_lapic *apic) in apic_lvtt_period() argument
269 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC; in apic_lvtt_period()
272 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic) in apic_lvtt_tscdeadline() argument
274 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE; in apic_lvtt_tscdeadline()
284 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_version() local
291 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); in kvm_apic_set_version()
294 apic_set_reg(apic, APIC_LVR, v); in kvm_apic_set_version()
348 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_update_irr() local
350 __kvm_apic_update_irr(pir, apic->regs); in kvm_apic_update_irr()
356 static inline void apic_set_irr(int vec, struct kvm_lapic *apic) in apic_set_irr() argument
358 apic_set_vector(vec, apic->regs + APIC_IRR); in apic_set_irr()
363 apic->irr_pending = true; in apic_set_irr()
366 static inline int apic_search_irr(struct kvm_lapic *apic) in apic_search_irr() argument
368 return find_highest_vector(apic->regs + APIC_IRR); in apic_search_irr()
371 static inline int apic_find_highest_irr(struct kvm_lapic *apic) in apic_find_highest_irr() argument
379 if (!apic->irr_pending) in apic_find_highest_irr()
382 kvm_x86_ops->sync_pir_to_irr(apic->vcpu); in apic_find_highest_irr()
383 result = apic_search_irr(apic); in apic_find_highest_irr()
389 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) in apic_clear_irr() argument
393 vcpu = apic->vcpu; in apic_clear_irr()
397 apic_clear_vector(vec, apic->regs + APIC_IRR); in apic_clear_irr()
400 apic->irr_pending = false; in apic_clear_irr()
401 apic_clear_vector(vec, apic->regs + APIC_IRR); in apic_clear_irr()
402 if (apic_search_irr(apic) != -1) in apic_clear_irr()
403 apic->irr_pending = true; in apic_clear_irr()
407 static inline void apic_set_isr(int vec, struct kvm_lapic *apic) in apic_set_isr() argument
411 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) in apic_set_isr()
414 vcpu = apic->vcpu; in apic_set_isr()
424 ++apic->isr_count; in apic_set_isr()
425 BUG_ON(apic->isr_count > MAX_APIC_VECTOR); in apic_set_isr()
431 apic->highest_isr_cache = vec; in apic_set_isr()
435 static inline int apic_find_highest_isr(struct kvm_lapic *apic) in apic_find_highest_isr() argument
443 if (!apic->isr_count) in apic_find_highest_isr()
445 if (likely(apic->highest_isr_cache != -1)) in apic_find_highest_isr()
446 return apic->highest_isr_cache; in apic_find_highest_isr()
448 result = find_highest_vector(apic->regs + APIC_ISR); in apic_find_highest_isr()
454 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) in apic_clear_isr() argument
457 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR)) in apic_clear_isr()
460 vcpu = apic->vcpu; in apic_clear_isr()
471 apic_find_highest_isr(apic)); in apic_clear_isr()
473 --apic->isr_count; in apic_clear_isr()
474 BUG_ON(apic->isr_count < 0); in apic_clear_isr()
475 apic->highest_isr_cache = -1; in apic_clear_isr()
490 highest_irr = apic_find_highest_irr(vcpu->arch.apic); in kvm_lapic_find_highest_irr()
495 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
502 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_irq() local
504 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector, in kvm_apic_set_irq()
556 static void apic_update_ppr(struct kvm_lapic *apic) in apic_update_ppr() argument
561 old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI); in apic_update_ppr()
562 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI); in apic_update_ppr()
563 isr = apic_find_highest_isr(apic); in apic_update_ppr()
572 apic, ppr, isr, isrv); in apic_update_ppr()
575 apic_set_reg(apic, APIC_PROCPRI, ppr); in apic_update_ppr()
577 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in apic_update_ppr()
581 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) in apic_set_tpr() argument
583 apic_set_reg(apic, APIC_TASKPRI, tpr); in apic_set_tpr()
584 apic_update_ppr(apic); in apic_set_tpr()
587 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda) in kvm_apic_broadcast() argument
589 if (apic_x2apic_mode(apic)) in kvm_apic_broadcast()
595 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda) in kvm_apic_match_physical_addr() argument
597 if (kvm_apic_broadcast(apic, mda)) in kvm_apic_match_physical_addr()
600 if (apic_x2apic_mode(apic)) in kvm_apic_match_physical_addr()
601 return mda == kvm_apic_id(apic); in kvm_apic_match_physical_addr()
603 return mda == SET_APIC_DEST_FIELD(kvm_apic_id(apic)); in kvm_apic_match_physical_addr()
606 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda) in kvm_apic_match_logical_addr() argument
610 if (kvm_apic_broadcast(apic, mda)) in kvm_apic_match_logical_addr()
613 logical_id = kvm_apic_get_reg(apic, APIC_LDR); in kvm_apic_match_logical_addr()
615 if (apic_x2apic_mode(apic)) in kvm_apic_match_logical_addr()
622 switch (kvm_apic_get_reg(apic, APIC_DFR)) { in kvm_apic_match_logical_addr()
630 apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR)); in kvm_apic_match_logical_addr()
654 struct kvm_lapic *target = vcpu->arch.apic; in kvm_apic_match_dest()
823 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, in __apic_accept_irq() argument
828 struct kvm_vcpu *vcpu = apic->vcpu; in __apic_accept_irq()
840 if (unlikely(!apic_enabled(apic))) in __apic_accept_irq()
848 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) { in __apic_accept_irq()
850 apic_set_vector(vector, apic->regs + APIC_TMR); in __apic_accept_irq()
852 apic_clear_vector(vector, apic->regs + APIC_TMR); in __apic_accept_irq()
858 apic_set_irr(vector, apic); in __apic_accept_irq()
888 apic->pending_events = (1UL << KVM_APIC_INIT); in __apic_accept_irq()
904 apic->sipi_vector = vector; in __apic_accept_irq()
907 set_bit(KVM_APIC_SIPI, &apic->pending_events); in __apic_accept_irq()
933 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector) in kvm_ioapic_handles_vector() argument
935 return test_bit(vector, (ulong *)apic->vcpu->arch.eoi_exit_bitmap); in kvm_ioapic_handles_vector()
938 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) in kvm_ioapic_send_eoi() argument
943 if (!kvm_ioapic_handles_vector(apic, vector)) in kvm_ioapic_send_eoi()
947 if (irqchip_split(apic->vcpu->kvm)) { in kvm_ioapic_send_eoi()
948 apic->vcpu->arch.pending_ioapic_eoi = vector; in kvm_ioapic_send_eoi()
949 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu); in kvm_ioapic_send_eoi()
953 if (apic_test_vector(vector, apic->regs + APIC_TMR)) in kvm_ioapic_send_eoi()
958 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode); in kvm_ioapic_send_eoi()
961 static int apic_set_eoi(struct kvm_lapic *apic) in apic_set_eoi() argument
963 int vector = apic_find_highest_isr(apic); in apic_set_eoi()
965 trace_kvm_eoi(apic, vector); in apic_set_eoi()
974 apic_clear_isr(vector, apic); in apic_set_eoi()
975 apic_update_ppr(apic); in apic_set_eoi()
977 kvm_ioapic_send_eoi(apic, vector); in apic_set_eoi()
978 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in apic_set_eoi()
988 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_eoi_accelerated() local
990 trace_kvm_eoi(apic, vector); in kvm_apic_set_eoi_accelerated()
992 kvm_ioapic_send_eoi(apic, vector); in kvm_apic_set_eoi_accelerated()
993 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in kvm_apic_set_eoi_accelerated()
997 static void apic_send_ipi(struct kvm_lapic *apic) in apic_send_ipi() argument
999 u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR); in apic_send_ipi()
1000 u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2); in apic_send_ipi()
1010 if (apic_x2apic_mode(apic)) in apic_send_ipi()
1025 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); in apic_send_ipi()
1028 static u32 apic_get_tmcct(struct kvm_lapic *apic) in apic_get_tmcct() argument
1034 ASSERT(apic != NULL); in apic_get_tmcct()
1037 if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 || in apic_get_tmcct()
1038 apic->lapic_timer.period == 0) in apic_get_tmcct()
1041 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); in apic_get_tmcct()
1045 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); in apic_get_tmcct()
1047 (APIC_BUS_CYCLE_NS * apic->divide_count)); in apic_get_tmcct()
1052 static void __report_tpr_access(struct kvm_lapic *apic, bool write) in __report_tpr_access() argument
1054 struct kvm_vcpu *vcpu = apic->vcpu; in __report_tpr_access()
1062 static inline void report_tpr_access(struct kvm_lapic *apic, bool write) in report_tpr_access() argument
1064 if (apic->vcpu->arch.tpr_access_reporting) in report_tpr_access()
1065 __report_tpr_access(apic, write); in report_tpr_access()
1068 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) in __apic_read() argument
1077 if (apic_x2apic_mode(apic)) in __apic_read()
1078 val = kvm_apic_id(apic); in __apic_read()
1080 val = kvm_apic_id(apic) << 24; in __apic_read()
1087 if (apic_lvtt_tscdeadline(apic)) in __apic_read()
1090 val = apic_get_tmcct(apic); in __apic_read()
1093 apic_update_ppr(apic); in __apic_read()
1094 val = kvm_apic_get_reg(apic, offset); in __apic_read()
1097 report_tpr_access(apic, false); in __apic_read()
1100 val = kvm_apic_get_reg(apic, offset); in __apic_read()
1112 static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len, in apic_reg_read() argument
1132 result = __apic_read(apic, offset & ~0xf); in apic_reg_read()
1150 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) in apic_mmio_in_range() argument
1152 return kvm_apic_hw_enabled(apic) && in apic_mmio_in_range()
1153 addr >= apic->base_address && in apic_mmio_in_range()
1154 addr < apic->base_address + LAPIC_MMIO_LENGTH; in apic_mmio_in_range()
1160 struct kvm_lapic *apic = to_lapic(this); in apic_mmio_read() local
1161 u32 offset = address - apic->base_address; in apic_mmio_read()
1163 if (!apic_mmio_in_range(apic, address)) in apic_mmio_read()
1166 apic_reg_read(apic, offset, len, data); in apic_mmio_read()
1171 static void update_divide_count(struct kvm_lapic *apic) in update_divide_count() argument
1175 tdcr = kvm_apic_get_reg(apic, APIC_TDCR); in update_divide_count()
1178 apic->divide_count = 0x1 << (tmp2 & 0x7); in update_divide_count()
1181 apic->divide_count); in update_divide_count()
1184 static void apic_update_lvtt(struct kvm_lapic *apic) in apic_update_lvtt() argument
1186 u32 timer_mode = kvm_apic_get_reg(apic, APIC_LVTT) & in apic_update_lvtt()
1187 apic->lapic_timer.timer_mode_mask; in apic_update_lvtt()
1189 if (apic->lapic_timer.timer_mode != timer_mode) { in apic_update_lvtt()
1190 apic->lapic_timer.timer_mode = timer_mode; in apic_update_lvtt()
1191 hrtimer_cancel(&apic->lapic_timer.timer); in apic_update_lvtt()
1195 static void apic_timer_expired(struct kvm_lapic *apic) in apic_timer_expired() argument
1197 struct kvm_vcpu *vcpu = apic->vcpu; in apic_timer_expired()
1199 struct kvm_timer *ktimer = &apic->lapic_timer; in apic_timer_expired()
1201 if (atomic_read(&apic->lapic_timer.pending)) in apic_timer_expired()
1204 atomic_inc(&apic->lapic_timer.pending); in apic_timer_expired()
1210 if (apic_lvtt_tscdeadline(apic)) in apic_timer_expired()
1221 struct kvm_lapic *apic = vcpu->arch.apic; in lapic_timer_int_injected() local
1222 u32 reg = kvm_apic_get_reg(apic, APIC_LVTT); in lapic_timer_int_injected()
1224 if (kvm_apic_hw_enabled(apic)) { in lapic_timer_int_injected()
1226 void *bitmap = apic->regs + APIC_ISR; in lapic_timer_int_injected()
1229 bitmap = apic->regs + APIC_IRR; in lapic_timer_int_injected()
1239 struct kvm_lapic *apic = vcpu->arch.apic; in wait_lapic_expire() local
1245 if (apic->lapic_timer.expired_tscdeadline == 0) in wait_lapic_expire()
1251 tsc_deadline = apic->lapic_timer.expired_tscdeadline; in wait_lapic_expire()
1252 apic->lapic_timer.expired_tscdeadline = 0; in wait_lapic_expire()
1261 static void start_apic_timer(struct kvm_lapic *apic) in start_apic_timer() argument
1265 atomic_set(&apic->lapic_timer.pending, 0); in start_apic_timer()
1267 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { in start_apic_timer()
1269 now = apic->lapic_timer.timer.base->get_time(); in start_apic_timer()
1270 apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT) in start_apic_timer()
1271 * APIC_BUS_CYCLE_NS * apic->divide_count; in start_apic_timer()
1273 if (!apic->lapic_timer.period) in start_apic_timer()
1280 if (apic_lvtt_period(apic)) { in start_apic_timer()
1283 if (apic->lapic_timer.period < min_period) { in start_apic_timer()
1287 apic->vcpu->vcpu_id, in start_apic_timer()
1288 apic->lapic_timer.period, min_period); in start_apic_timer()
1289 apic->lapic_timer.period = min_period; in start_apic_timer()
1293 hrtimer_start(&apic->lapic_timer.timer, in start_apic_timer()
1294 ktime_add_ns(now, apic->lapic_timer.period), in start_apic_timer()
1302 kvm_apic_get_reg(apic, APIC_TMICT), in start_apic_timer()
1303 apic->lapic_timer.period, in start_apic_timer()
1305 apic->lapic_timer.period))); in start_apic_timer()
1306 } else if (apic_lvtt_tscdeadline(apic)) { in start_apic_timer()
1308 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; in start_apic_timer()
1311 struct kvm_vcpu *vcpu = apic->vcpu; in start_apic_timer()
1320 now = apic->lapic_timer.timer.base->get_time(); in start_apic_timer()
1327 hrtimer_start(&apic->lapic_timer.timer, in start_apic_timer()
1330 apic_timer_expired(apic); in start_apic_timer()
1336 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) in apic_manage_nmi_watchdog() argument
1340 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) { in apic_manage_nmi_watchdog()
1341 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode; in apic_manage_nmi_watchdog()
1344 "for cpu %d\n", apic->vcpu->vcpu_id); in apic_manage_nmi_watchdog()
1345 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); in apic_manage_nmi_watchdog()
1347 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); in apic_manage_nmi_watchdog()
1351 static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) in apic_reg_write() argument
1359 if (!apic_x2apic_mode(apic)) in apic_reg_write()
1360 kvm_apic_set_id(apic, val >> 24); in apic_reg_write()
1366 report_tpr_access(apic, true); in apic_reg_write()
1367 apic_set_tpr(apic, val & 0xff); in apic_reg_write()
1371 apic_set_eoi(apic); in apic_reg_write()
1375 if (!apic_x2apic_mode(apic)) in apic_reg_write()
1376 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK); in apic_reg_write()
1382 if (!apic_x2apic_mode(apic)) { in apic_reg_write()
1383 apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); in apic_reg_write()
1384 recalculate_apic_map(apic->vcpu->kvm); in apic_reg_write()
1391 if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI) in apic_reg_write()
1393 apic_set_spiv(apic, val & mask); in apic_reg_write()
1399 lvt_val = kvm_apic_get_reg(apic, in apic_reg_write()
1401 apic_set_reg(apic, APIC_LVTT + 0x10 * i, in apic_reg_write()
1404 apic_update_lvtt(apic); in apic_reg_write()
1405 atomic_set(&apic->lapic_timer.pending, 0); in apic_reg_write()
1412 apic_set_reg(apic, APIC_ICR, val & ~(1 << 12)); in apic_reg_write()
1413 apic_send_ipi(apic); in apic_reg_write()
1417 if (!apic_x2apic_mode(apic)) in apic_reg_write()
1419 apic_set_reg(apic, APIC_ICR2, val); in apic_reg_write()
1423 apic_manage_nmi_watchdog(apic, val); in apic_reg_write()
1429 if (!kvm_apic_sw_enabled(apic)) in apic_reg_write()
1433 apic_set_reg(apic, reg, val); in apic_reg_write()
1438 if (!kvm_apic_sw_enabled(apic)) in apic_reg_write()
1440 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); in apic_reg_write()
1441 apic_set_reg(apic, APIC_LVTT, val); in apic_reg_write()
1442 apic_update_lvtt(apic); in apic_reg_write()
1446 if (apic_lvtt_tscdeadline(apic)) in apic_reg_write()
1449 hrtimer_cancel(&apic->lapic_timer.timer); in apic_reg_write()
1450 apic_set_reg(apic, APIC_TMICT, val); in apic_reg_write()
1451 start_apic_timer(apic); in apic_reg_write()
1457 apic_set_reg(apic, APIC_TDCR, val); in apic_reg_write()
1458 update_divide_count(apic); in apic_reg_write()
1462 if (apic_x2apic_mode(apic) && val != 0) { in apic_reg_write()
1469 if (apic_x2apic_mode(apic)) { in apic_reg_write()
1470 apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff)); in apic_reg_write()
1486 struct kvm_lapic *apic = to_lapic(this); in apic_mmio_write() local
1487 unsigned int offset = address - apic->base_address; in apic_mmio_write()
1490 if (!apic_mmio_in_range(apic, address)) in apic_mmio_write()
1511 apic_reg_write(apic, offset & 0xff0, val); in apic_mmio_write()
1519 apic_reg_write(vcpu->arch.apic, APIC_EOI, 0); in kvm_lapic_set_eoi()
1531 apic_reg_read(vcpu->arch.apic, offset, 4, &val); in kvm_apic_write_nodecode()
1534 apic_reg_write(vcpu->arch.apic, offset, val); in kvm_apic_write_nodecode()
1540 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_free_lapic() local
1542 if (!vcpu->arch.apic) in kvm_free_lapic()
1545 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_free_lapic()
1550 if (!apic->sw_enabled) in kvm_free_lapic()
1553 if (apic->regs) in kvm_free_lapic()
1554 free_page((unsigned long)apic->regs); in kvm_free_lapic()
1556 kfree(apic); in kvm_free_lapic()
1567 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_get_lapic_tscdeadline_msr() local
1569 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || in kvm_get_lapic_tscdeadline_msr()
1570 apic_lvtt_period(apic)) in kvm_get_lapic_tscdeadline_msr()
1573 return apic->lapic_timer.tscdeadline; in kvm_get_lapic_tscdeadline_msr()
1578 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_set_lapic_tscdeadline_msr() local
1580 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || in kvm_set_lapic_tscdeadline_msr()
1581 apic_lvtt_period(apic)) in kvm_set_lapic_tscdeadline_msr()
1584 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_set_lapic_tscdeadline_msr()
1585 apic->lapic_timer.tscdeadline = data; in kvm_set_lapic_tscdeadline_msr()
1586 start_apic_timer(apic); in kvm_set_lapic_tscdeadline_msr()
1591 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_set_tpr() local
1596 apic_set_tpr(apic, ((cr8 & 0x0f) << 4) in kvm_lapic_set_tpr()
1597 | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4)); in kvm_lapic_set_tpr()
1607 tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI); in kvm_lapic_get_cr8()
1615 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_set_base() local
1617 if (!apic) { in kvm_lapic_set_base()
1636 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); in kvm_lapic_set_base()
1642 apic->base_address = apic->vcpu->arch.apic_base & in kvm_lapic_set_base()
1646 apic->base_address != APIC_DEFAULT_PHYS_BASE) in kvm_lapic_set_base()
1651 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); in kvm_lapic_set_base()
1657 struct kvm_lapic *apic; in kvm_lapic_reset() local
1663 apic = vcpu->arch.apic; in kvm_lapic_reset()
1664 ASSERT(apic != NULL); in kvm_lapic_reset()
1667 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_lapic_reset()
1670 kvm_apic_set_id(apic, vcpu->vcpu_id); in kvm_lapic_reset()
1671 kvm_apic_set_version(apic->vcpu); in kvm_lapic_reset()
1674 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); in kvm_lapic_reset()
1675 apic_update_lvtt(apic); in kvm_lapic_reset()
1677 apic_set_reg(apic, APIC_LVT0, in kvm_lapic_reset()
1679 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); in kvm_lapic_reset()
1681 apic_set_reg(apic, APIC_DFR, 0xffffffffU); in kvm_lapic_reset()
1682 apic_set_spiv(apic, 0xff); in kvm_lapic_reset()
1683 apic_set_reg(apic, APIC_TASKPRI, 0); in kvm_lapic_reset()
1684 if (!apic_x2apic_mode(apic)) in kvm_lapic_reset()
1685 kvm_apic_set_ldr(apic, 0); in kvm_lapic_reset()
1686 apic_set_reg(apic, APIC_ESR, 0); in kvm_lapic_reset()
1687 apic_set_reg(apic, APIC_ICR, 0); in kvm_lapic_reset()
1688 apic_set_reg(apic, APIC_ICR2, 0); in kvm_lapic_reset()
1689 apic_set_reg(apic, APIC_TDCR, 0); in kvm_lapic_reset()
1690 apic_set_reg(apic, APIC_TMICT, 0); in kvm_lapic_reset()
1692 apic_set_reg(apic, APIC_IRR + 0x10 * i, 0); in kvm_lapic_reset()
1693 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); in kvm_lapic_reset()
1694 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); in kvm_lapic_reset()
1696 apic->irr_pending = kvm_vcpu_apic_vid_enabled(vcpu); in kvm_lapic_reset()
1697 apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0; in kvm_lapic_reset()
1698 apic->highest_isr_cache = -1; in kvm_lapic_reset()
1699 update_divide_count(apic); in kvm_lapic_reset()
1700 atomic_set(&apic->lapic_timer.pending, 0); in kvm_lapic_reset()
1705 apic_update_ppr(apic); in kvm_lapic_reset()
1712 vcpu, kvm_apic_id(apic), in kvm_lapic_reset()
1713 vcpu->arch.apic_base, apic->base_address); in kvm_lapic_reset()
1722 static bool lapic_is_periodic(struct kvm_lapic *apic) in lapic_is_periodic() argument
1724 return apic_lvtt_period(apic); in lapic_is_periodic()
1729 struct kvm_lapic *apic = vcpu->arch.apic; in apic_has_pending_timer() local
1731 if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) && in apic_has_pending_timer()
1732 apic_lvt_enabled(apic, APIC_LVTT)) in apic_has_pending_timer()
1733 return atomic_read(&apic->lapic_timer.pending); in apic_has_pending_timer()
1738 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) in kvm_apic_local_deliver() argument
1740 u32 reg = kvm_apic_get_reg(apic, lvt_type); in kvm_apic_local_deliver()
1743 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { in kvm_apic_local_deliver()
1747 return __apic_accept_irq(apic, mode, vector, 1, trig_mode, in kvm_apic_local_deliver()
1755 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_nmi_wd_deliver() local
1757 if (apic) in kvm_apic_nmi_wd_deliver()
1758 kvm_apic_local_deliver(apic, APIC_LVT0); in kvm_apic_nmi_wd_deliver()
1769 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); in apic_timer_fn() local
1771 apic_timer_expired(apic); in apic_timer_fn()
1773 if (lapic_is_periodic(apic)) { in apic_timer_fn()
1782 struct kvm_lapic *apic; in kvm_create_lapic() local
1787 apic = kzalloc(sizeof(*apic), GFP_KERNEL); in kvm_create_lapic()
1788 if (!apic) in kvm_create_lapic()
1791 vcpu->arch.apic = apic; in kvm_create_lapic()
1793 apic->regs = (void *)get_zeroed_page(GFP_KERNEL); in kvm_create_lapic()
1794 if (!apic->regs) { in kvm_create_lapic()
1799 apic->vcpu = vcpu; in kvm_create_lapic()
1801 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, in kvm_create_lapic()
1803 apic->lapic_timer.timer.function = apic_timer_fn; in kvm_create_lapic()
1815 kvm_iodevice_init(&apic->dev, &apic_mmio_ops); in kvm_create_lapic()
1819 kfree(apic); in kvm_create_lapic()
1826 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_has_interrupt() local
1829 if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic)) in kvm_apic_has_interrupt()
1832 apic_update_ppr(apic); in kvm_apic_has_interrupt()
1833 highest_irr = apic_find_highest_irr(apic); in kvm_apic_has_interrupt()
1835 ((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI))) in kvm_apic_has_interrupt()
1842 u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0); in kvm_apic_accept_pic_intr()
1845 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in kvm_apic_accept_pic_intr()
1855 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_inject_apic_timer_irqs() local
1860 if (atomic_read(&apic->lapic_timer.pending) > 0) { in kvm_inject_apic_timer_irqs()
1861 kvm_apic_local_deliver(apic, APIC_LVTT); in kvm_inject_apic_timer_irqs()
1862 if (apic_lvtt_tscdeadline(apic)) in kvm_inject_apic_timer_irqs()
1863 apic->lapic_timer.tscdeadline = 0; in kvm_inject_apic_timer_irqs()
1864 atomic_set(&apic->lapic_timer.pending, 0); in kvm_inject_apic_timer_irqs()
1871 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_get_apic_interrupt() local
1883 apic_set_isr(vector, apic); in kvm_get_apic_interrupt()
1884 apic_update_ppr(apic); in kvm_get_apic_interrupt()
1885 apic_clear_irr(vector, apic); in kvm_get_apic_interrupt()
1892 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_post_state_restore() local
1896 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV))); in kvm_apic_post_state_restore()
1897 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); in kvm_apic_post_state_restore()
1899 kvm_apic_set_id(apic, kvm_apic_id(apic)); in kvm_apic_post_state_restore()
1902 apic_update_ppr(apic); in kvm_apic_post_state_restore()
1903 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_apic_post_state_restore()
1904 apic_update_lvtt(apic); in kvm_apic_post_state_restore()
1905 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); in kvm_apic_post_state_restore()
1906 update_divide_count(apic); in kvm_apic_post_state_restore()
1907 start_apic_timer(apic); in kvm_apic_post_state_restore()
1908 apic->irr_pending = true; in kvm_apic_post_state_restore()
1909 apic->isr_count = kvm_x86_ops->hwapic_isr_update ? in kvm_apic_post_state_restore()
1910 1 : count_vectors(apic->regs + APIC_ISR); in kvm_apic_post_state_restore()
1911 apic->highest_isr_cache = -1; in kvm_apic_post_state_restore()
1914 apic_find_highest_irr(apic)); in kvm_apic_post_state_restore()
1917 apic_find_highest_isr(apic)); in kvm_apic_post_state_restore()
1932 timer = &vcpu->arch.apic->lapic_timer.timer; in __kvm_migrate_apic_timer()
1945 struct kvm_lapic *apic) in apic_sync_pv_eoi_from_guest() argument
1970 vector = apic_set_eoi(apic); in apic_sync_pv_eoi_from_guest()
1971 trace_kvm_pv_eoi(apic, vector); in apic_sync_pv_eoi_from_guest()
1979 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); in kvm_lapic_sync_from_vapic()
1984 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, in kvm_lapic_sync_from_vapic()
1988 apic_set_tpr(vcpu->arch.apic, data & 0xff); in kvm_lapic_sync_from_vapic()
1998 struct kvm_lapic *apic) in apic_sync_pv_eoi_to_guest() argument
2002 apic->irr_pending || in apic_sync_pv_eoi_to_guest()
2004 apic->highest_isr_cache == -1 || in apic_sync_pv_eoi_to_guest()
2006 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) { in apic_sync_pv_eoi_to_guest()
2014 pv_eoi_set_pending(apic->vcpu); in apic_sync_pv_eoi_to_guest()
2021 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_sync_to_vapic() local
2023 apic_sync_pv_eoi_to_guest(vcpu, apic); in kvm_lapic_sync_to_vapic()
2028 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff; in kvm_lapic_sync_to_vapic()
2029 max_irr = apic_find_highest_irr(apic); in kvm_lapic_sync_to_vapic()
2032 max_isr = apic_find_highest_isr(apic); in kvm_lapic_sync_to_vapic()
2037 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, in kvm_lapic_sync_to_vapic()
2045 &vcpu->arch.apic->vapic_cache, in kvm_lapic_set_vapic_addr()
2053 vcpu->arch.apic->vapic_addr = vapic_addr; in kvm_lapic_set_vapic_addr()
2059 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_x2apic_msr_write() local
2062 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) in kvm_x2apic_msr_write()
2070 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); in kvm_x2apic_msr_write()
2071 return apic_reg_write(apic, reg, (u32)data); in kvm_x2apic_msr_write()
2076 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_x2apic_msr_read() local
2079 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic)) in kvm_x2apic_msr_read()
2088 if (apic_reg_read(apic, reg, 4, &low)) in kvm_x2apic_msr_read()
2091 apic_reg_read(apic, APIC_ICR2, 4, &high); in kvm_x2apic_msr_read()
2100 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_hv_vapic_msr_write() local
2107 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); in kvm_hv_vapic_msr_write()
2108 return apic_reg_write(apic, reg, (u32)data); in kvm_hv_vapic_msr_write()
2113 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_hv_vapic_msr_read() local
2119 if (apic_reg_read(apic, reg, 4, &low)) in kvm_hv_vapic_msr_read()
2122 apic_reg_read(apic, APIC_ICR2, 4, &high); in kvm_hv_vapic_msr_read()
2144 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_accept_events() local
2148 if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) in kvm_apic_accept_events()
2158 if (test_bit(KVM_APIC_SIPI, &apic->pending_events)) in kvm_apic_accept_events()
2159 clear_bit(KVM_APIC_SIPI, &apic->pending_events); in kvm_apic_accept_events()
2163 pe = xchg(&apic->pending_events, 0); in kvm_apic_accept_events()
2167 if (kvm_vcpu_is_bsp(apic->vcpu)) in kvm_apic_accept_events()
2176 sipi_vector = apic->sipi_vector; in kvm_apic_accept_events()