Lines Matching refs:vcpu

9 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)  in kvm_clear_exception_queue()  argument
11 vcpu->arch.exception.pending = false; in kvm_clear_exception_queue()
14 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, in kvm_queue_interrupt() argument
17 vcpu->arch.interrupt.pending = true; in kvm_queue_interrupt()
18 vcpu->arch.interrupt.soft = soft; in kvm_queue_interrupt()
19 vcpu->arch.interrupt.nr = vector; in kvm_queue_interrupt()
22 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) in kvm_clear_interrupt_queue() argument
24 vcpu->arch.interrupt.pending = false; in kvm_clear_interrupt_queue()
27 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) in kvm_event_needs_reinjection() argument
29 return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || in kvm_event_needs_reinjection()
30 vcpu->arch.nmi_injected; in kvm_event_needs_reinjection()
38 static inline bool is_protmode(struct kvm_vcpu *vcpu) in is_protmode() argument
40 return kvm_read_cr0_bits(vcpu, X86_CR0_PE); in is_protmode()
43 static inline int is_long_mode(struct kvm_vcpu *vcpu) in is_long_mode() argument
46 return vcpu->arch.efer & EFER_LMA; in is_long_mode()
52 static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) in is_64_bit_mode() argument
56 if (!is_long_mode(vcpu)) in is_64_bit_mode()
58 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in is_64_bit_mode()
62 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) in mmu_is_nested() argument
64 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; in mmu_is_nested()
67 static inline int is_pae(struct kvm_vcpu *vcpu) in is_pae() argument
69 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); in is_pae()
72 static inline int is_pse(struct kvm_vcpu *vcpu) in is_pse() argument
74 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); in is_pse()
77 static inline int is_paging(struct kvm_vcpu *vcpu) in is_paging() argument
79 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); in is_paging()
87 static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, in vcpu_cache_mmio_info() argument
90 vcpu->arch.mmio_gva = gva & PAGE_MASK; in vcpu_cache_mmio_info()
91 vcpu->arch.access = access; in vcpu_cache_mmio_info()
92 vcpu->arch.mmio_gfn = gfn; in vcpu_cache_mmio_info()
93 vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; in vcpu_cache_mmio_info()
96 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) in vcpu_match_mmio_gen() argument
98 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; in vcpu_match_mmio_gen()
107 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) in vcpu_clear_mmio_info() argument
109 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) in vcpu_clear_mmio_info()
112 vcpu->arch.mmio_gva = 0; in vcpu_clear_mmio_info()
115 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) in vcpu_match_mmio_gva() argument
117 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && in vcpu_match_mmio_gva()
118 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) in vcpu_match_mmio_gva()
124 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in vcpu_match_mmio_gpa() argument
126 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && in vcpu_match_mmio_gpa()
127 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) in vcpu_match_mmio_gpa()
133 static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, in kvm_register_readl() argument
136 unsigned long val = kvm_register_read(vcpu, reg); in kvm_register_readl()
138 return is_64_bit_mode(vcpu) ? val : (u32)val; in kvm_register_readl()
141 static inline void kvm_register_writel(struct kvm_vcpu *vcpu, in kvm_register_writel() argument
145 if (!is_64_bit_mode(vcpu)) in kvm_register_writel()
147 return kvm_register_write(vcpu, reg, val); in kvm_register_writel()
160 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
161 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
162 void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
163 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
165 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
175 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
176 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
177 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
178 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
179 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
180 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,